1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
71 /* "device miibus" required. See GENERIC if you get errors here. */
72 #include "miibus_if.h"
74 static int nfe_probe(device_t);
75 static int nfe_attach(device_t);
76 static int nfe_detach(device_t);
77 static int nfe_suspend(device_t);
78 static int nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static int nfe_can_use_msix(struct nfe_softc *);
81 static void nfe_power(struct nfe_softc *);
82 static int nfe_miibus_readreg(device_t, int, int);
83 static int nfe_miibus_writereg(device_t, int, int, int);
84 static void nfe_miibus_statchg(device_t);
85 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
86 static void nfe_set_intr(struct nfe_softc *);
87 static __inline void nfe_enable_intr(struct nfe_softc *);
88 static __inline void nfe_disable_intr(struct nfe_softc *);
89 static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
90 static void nfe_alloc_msix(struct nfe_softc *, int);
91 static int nfe_intr(void *);
92 static void nfe_int_task(void *, int);
93 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
94 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
95 static int nfe_newbuf(struct nfe_softc *, int);
96 static int nfe_jnewbuf(struct nfe_softc *, int);
97 static int nfe_rxeof(struct nfe_softc *, int, int *);
98 static int nfe_jrxeof(struct nfe_softc *, int, int *);
99 static void nfe_txeof(struct nfe_softc *);
100 static int nfe_encap(struct nfe_softc *, struct mbuf **);
101 static void nfe_setmulti(struct nfe_softc *);
102 static void nfe_start(struct ifnet *);
103 static void nfe_start_locked(struct ifnet *);
104 static void nfe_watchdog(struct ifnet *);
105 static void nfe_init(void *);
106 static void nfe_init_locked(void *);
107 static void nfe_stop(struct ifnet *);
108 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
110 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static int nfe_ifmedia_upd(struct ifnet *);
118 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
119 static void nfe_tick(void *);
120 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
121 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
124 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
125 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
126 static void nfe_sysctl_node(struct nfe_softc *);
127 static void nfe_stats_clear(struct nfe_softc *);
128 static void nfe_stats_update(struct nfe_softc *);
129 static void nfe_set_linkspeed(struct nfe_softc *);
130 static void nfe_set_wol(struct nfe_softc *);
133 static int nfedebug = 0;
134 #define DPRINTF(sc, ...) do { \
136 device_printf((sc)->nfe_dev, __VA_ARGS__); \
138 #define DPRINTFN(sc, n, ...) do { \
139 if (nfedebug >= (n)) \
140 device_printf((sc)->nfe_dev, __VA_ARGS__); \
143 #define DPRINTF(sc, ...)
144 #define DPRINTFN(sc, n, ...)
147 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
148 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
149 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
152 static int msi_disable = 0;
153 static int msix_disable = 0;
154 static int jumbo_disable = 0;
155 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
156 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
157 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
159 static device_method_t nfe_methods[] = {
160 /* Device interface */
161 DEVMETHOD(device_probe, nfe_probe),
162 DEVMETHOD(device_attach, nfe_attach),
163 DEVMETHOD(device_detach, nfe_detach),
164 DEVMETHOD(device_suspend, nfe_suspend),
165 DEVMETHOD(device_resume, nfe_resume),
166 DEVMETHOD(device_shutdown, nfe_shutdown),
169 DEVMETHOD(bus_print_child, bus_generic_print_child),
170 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
173 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
174 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
175 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
180 static driver_t nfe_driver = {
183 sizeof(struct nfe_softc)
186 static devclass_t nfe_devclass;
188 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
189 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
191 static struct nfe_type nfe_devs[] = {
192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
193 "NVIDIA nForce MCP Networking Adapter"},
194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
195 "NVIDIA nForce2 MCP2 Networking Adapter"},
196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
197 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
199 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
201 "NVIDIA nForce3 MCP3 Networking Adapter"},
202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
203 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
205 "NVIDIA nForce3 MCP7 Networking Adapter"},
206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
207 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
209 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
213 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
215 "NVIDIA nForce 430 MCP12 Networking Adapter"},
216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
217 "NVIDIA nForce 430 MCP13 Networking Adapter"},
218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
219 "NVIDIA nForce MCP55 Networking Adapter"},
220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
221 "NVIDIA nForce MCP55 Networking Adapter"},
222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
223 "NVIDIA nForce MCP61 Networking Adapter"},
224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
225 "NVIDIA nForce MCP61 Networking Adapter"},
226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
227 "NVIDIA nForce MCP61 Networking Adapter"},
228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
229 "NVIDIA nForce MCP61 Networking Adapter"},
230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
231 "NVIDIA nForce MCP65 Networking Adapter"},
232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
233 "NVIDIA nForce MCP65 Networking Adapter"},
234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
235 "NVIDIA nForce MCP65 Networking Adapter"},
236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
237 "NVIDIA nForce MCP65 Networking Adapter"},
238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
239 "NVIDIA nForce MCP67 Networking Adapter"},
240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
241 "NVIDIA nForce MCP67 Networking Adapter"},
242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
243 "NVIDIA nForce MCP67 Networking Adapter"},
244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
245 "NVIDIA nForce MCP67 Networking Adapter"},
246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
247 "NVIDIA nForce MCP73 Networking Adapter"},
248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
249 "NVIDIA nForce MCP73 Networking Adapter"},
250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
251 "NVIDIA nForce MCP73 Networking Adapter"},
252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
253 "NVIDIA nForce MCP73 Networking Adapter"},
254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
255 "NVIDIA nForce MCP77 Networking Adapter"},
256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
257 "NVIDIA nForce MCP77 Networking Adapter"},
258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
259 "NVIDIA nForce MCP77 Networking Adapter"},
260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
261 "NVIDIA nForce MCP77 Networking Adapter"},
262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
263 "NVIDIA nForce MCP79 Networking Adapter"},
264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
265 "NVIDIA nForce MCP79 Networking Adapter"},
266 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
267 "NVIDIA nForce MCP79 Networking Adapter"},
268 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
269 "NVIDIA nForce MCP79 Networking Adapter"},
274 /* Probe for supported hardware ID's */
276 nfe_probe(device_t dev)
281 /* Check for matching PCI DEVICE ID's */
282 while (t->name != NULL) {
283 if ((pci_get_vendor(dev) == t->vid_id) &&
284 (pci_get_device(dev) == t->dev_id)) {
285 device_set_desc(dev, t->name);
286 return (BUS_PROBE_DEFAULT);
295 nfe_alloc_msix(struct nfe_softc *sc, int count)
300 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
302 if (sc->nfe_msix_res == NULL) {
303 device_printf(sc->nfe_dev,
304 "couldn't allocate MSIX table resource\n");
308 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
309 SYS_RES_MEMORY, &rid, RF_ACTIVE);
310 if (sc->nfe_msix_pba_res == NULL) {
311 device_printf(sc->nfe_dev,
312 "couldn't allocate MSIX PBA resource\n");
313 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
315 sc->nfe_msix_res = NULL;
319 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
320 if (count == NFE_MSI_MESSAGES) {
322 device_printf(sc->nfe_dev,
323 "Using %d MSIX messages\n", count);
327 device_printf(sc->nfe_dev,
328 "couldn't allocate MSIX\n");
329 pci_release_msi(sc->nfe_dev);
330 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
331 PCIR_BAR(3), sc->nfe_msix_pba_res);
332 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
333 PCIR_BAR(2), sc->nfe_msix_res);
334 sc->nfe_msix_pba_res = NULL;
335 sc->nfe_msix_res = NULL;
341 nfe_attach(device_t dev)
343 struct nfe_softc *sc;
345 bus_addr_t dma_addr_max;
346 int error = 0, i, msic, reg, rid;
348 sc = device_get_softc(dev);
351 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
353 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
355 pci_enable_busmaster(dev);
358 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
360 if (sc->nfe_res[0] == NULL) {
361 device_printf(dev, "couldn't map memory resources\n");
362 mtx_destroy(&sc->nfe_mtx);
366 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
369 v = pci_read_config(dev, reg + 0x08, 2);
370 /* Change max. read request size to 4096. */
373 pci_write_config(dev, reg + 0x08, v, 2);
375 v = pci_read_config(dev, reg + 0x0c, 2);
376 /* link capability */
378 width = pci_read_config(dev, reg + 0x12, 2);
379 /* negotiated link width */
380 width = (width >> 4) & 0x3f;
382 device_printf(sc->nfe_dev,
383 "warning, negotiated width of link(x%d) != "
384 "max. width of link(x%d)\n", width, v);
387 if (nfe_can_use_msix(sc) == 0) {
388 device_printf(sc->nfe_dev,
389 "MSI/MSI-X capability black-listed, will use INTx\n");
394 /* Allocate interrupt */
395 if (msix_disable == 0 || msi_disable == 0) {
396 if (msix_disable == 0 &&
397 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
398 nfe_alloc_msix(sc, msic);
399 if (msi_disable == 0 && sc->nfe_msix == 0 &&
400 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
401 pci_alloc_msi(dev, &msic) == 0) {
402 if (msic == NFE_MSI_MESSAGES) {
405 "Using %d MSI messages\n", msic);
408 pci_release_msi(dev);
412 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
414 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
415 RF_SHAREABLE | RF_ACTIVE);
416 if (sc->nfe_irq[0] == NULL) {
417 device_printf(dev, "couldn't allocate IRQ resources\n");
422 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
423 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
424 SYS_RES_IRQ, &rid, RF_ACTIVE);
425 if (sc->nfe_irq[i] == NULL) {
427 "couldn't allocate IRQ resources for "
428 "message %d\n", rid);
433 /* Map interrupts to vector 0. */
434 if (sc->nfe_msix != 0) {
435 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
436 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
437 } else if (sc->nfe_msi != 0) {
438 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
439 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
443 /* Set IRQ status/mask register. */
444 sc->nfe_irq_status = NFE_IRQ_STATUS;
445 sc->nfe_irq_mask = NFE_IRQ_MASK;
446 sc->nfe_intrs = NFE_IRQ_WANTED;
448 if (sc->nfe_msix != 0) {
449 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
450 sc->nfe_nointrs = NFE_IRQ_WANTED;
451 } else if (sc->nfe_msi != 0) {
452 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
453 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
456 sc->nfe_devid = pci_get_device(dev);
457 sc->nfe_revid = pci_get_revid(dev);
460 switch (sc->nfe_devid) {
461 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
462 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
463 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
464 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
465 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
467 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
468 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
469 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
471 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
472 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
473 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
474 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
475 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
478 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
479 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
480 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
481 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
484 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
485 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
486 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
487 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
488 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
489 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
490 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
491 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
492 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
493 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
494 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
495 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
496 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
497 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
499 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
500 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
501 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
502 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
503 /* XXX flow control */
504 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
505 NFE_CORRECT_MACADDR | NFE_MIB_V3;
507 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
508 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
509 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
510 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
511 /* XXX flow control */
512 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
513 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
515 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
516 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
517 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
518 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
519 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
520 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
526 /* Check for reversed ethernet address */
527 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
528 sc->nfe_flags |= NFE_CORRECT_MACADDR;
529 nfe_get_macaddr(sc, sc->eaddr);
531 * Allocate the parent bus DMA tag appropriate for PCI.
533 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
534 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
535 dma_addr_max = NFE_DMA_MAXADDR;
536 error = bus_dma_tag_create(
537 bus_get_dma_tag(sc->nfe_dev), /* parent */
538 1, 0, /* alignment, boundary */
539 dma_addr_max, /* lowaddr */
540 BUS_SPACE_MAXADDR, /* highaddr */
541 NULL, NULL, /* filter, filterarg */
542 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
543 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
545 NULL, NULL, /* lockfunc, lockarg */
546 &sc->nfe_parent_tag);
550 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
552 device_printf(dev, "can not if_alloc()\n");
558 * Allocate Tx and Rx rings.
560 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
563 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
566 nfe_alloc_jrx_ring(sc, &sc->jrxq);
567 /* Create sysctl node. */
571 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
572 ifp->if_mtu = ETHERMTU;
573 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
574 ifp->if_ioctl = nfe_ioctl;
575 ifp->if_start = nfe_start;
576 ifp->if_hwassist = 0;
577 ifp->if_capabilities = 0;
578 ifp->if_init = nfe_init;
579 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
580 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
581 IFQ_SET_READY(&ifp->if_snd);
583 if (sc->nfe_flags & NFE_HW_CSUM) {
584 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
585 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
587 ifp->if_capenable = ifp->if_capabilities;
589 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
590 /* VLAN capability setup. */
591 ifp->if_capabilities |= IFCAP_VLAN_MTU;
592 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
593 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
594 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
595 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM |
599 if (pci_find_cap(dev, PCIY_PMG, ®) == 0)
600 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
601 ifp->if_capenable = ifp->if_capabilities;
604 * Tell the upper layer(s) we support long frames.
605 * Must appear after the call to ether_ifattach() because
606 * ether_ifattach() sets ifi_hdrlen to the default value.
608 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
610 #ifdef DEVICE_POLLING
611 ifp->if_capabilities |= IFCAP_POLLING;
615 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
616 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
619 device_printf(dev, "attaching PHYs failed\n");
622 ether_ifattach(ifp, sc->eaddr);
624 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
625 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
626 taskqueue_thread_enqueue, &sc->nfe_tq);
627 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
628 device_get_nameunit(sc->nfe_dev));
630 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
631 error = bus_setup_intr(dev, sc->nfe_irq[0],
632 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
633 &sc->nfe_intrhand[0]);
635 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
636 error = bus_setup_intr(dev, sc->nfe_irq[i],
637 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
638 &sc->nfe_intrhand[i]);
644 device_printf(dev, "couldn't set up irq\n");
645 taskqueue_free(sc->nfe_tq);
660 nfe_detach(device_t dev)
662 struct nfe_softc *sc;
664 uint8_t eaddr[ETHER_ADDR_LEN];
667 sc = device_get_softc(dev);
668 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
671 #ifdef DEVICE_POLLING
672 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
673 ether_poll_deregister(ifp);
675 if (device_is_attached(dev)) {
678 ifp->if_flags &= ~IFF_UP;
680 callout_drain(&sc->nfe_stat_ch);
685 /* restore ethernet address */
686 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
687 for (i = 0; i < ETHER_ADDR_LEN; i++) {
688 eaddr[i] = sc->eaddr[5 - i];
691 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
692 nfe_set_macaddr(sc, eaddr);
696 device_delete_child(dev, sc->nfe_miibus);
697 bus_generic_detach(dev);
698 if (sc->nfe_tq != NULL) {
699 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
700 taskqueue_free(sc->nfe_tq);
704 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
705 if (sc->nfe_intrhand[i] != NULL) {
706 bus_teardown_intr(dev, sc->nfe_irq[i],
707 sc->nfe_intrhand[i]);
708 sc->nfe_intrhand[i] = NULL;
712 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
713 if (sc->nfe_irq[0] != NULL)
714 bus_release_resource(dev, SYS_RES_IRQ, 0,
717 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
718 if (sc->nfe_irq[i] != NULL) {
719 bus_release_resource(dev, SYS_RES_IRQ, rid,
721 sc->nfe_irq[i] = NULL;
724 pci_release_msi(dev);
726 if (sc->nfe_msix_pba_res != NULL) {
727 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
728 sc->nfe_msix_pba_res);
729 sc->nfe_msix_pba_res = NULL;
731 if (sc->nfe_msix_res != NULL) {
732 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
734 sc->nfe_msix_res = NULL;
736 if (sc->nfe_res[0] != NULL) {
737 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
739 sc->nfe_res[0] = NULL;
742 nfe_free_tx_ring(sc, &sc->txq);
743 nfe_free_rx_ring(sc, &sc->rxq);
744 nfe_free_jrx_ring(sc, &sc->jrxq);
746 if (sc->nfe_parent_tag) {
747 bus_dma_tag_destroy(sc->nfe_parent_tag);
748 sc->nfe_parent_tag = NULL;
751 mtx_destroy(&sc->nfe_mtx);
758 nfe_suspend(device_t dev)
760 struct nfe_softc *sc;
762 sc = device_get_softc(dev);
765 nfe_stop(sc->nfe_ifp);
767 sc->nfe_suspended = 1;
775 nfe_resume(device_t dev)
777 struct nfe_softc *sc;
780 sc = device_get_softc(dev);
785 if (ifp->if_flags & IFF_UP)
787 sc->nfe_suspended = 0;
795 nfe_can_use_msix(struct nfe_softc *sc)
797 static struct msix_blacklist {
800 } msix_blacklists[] = {
801 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
804 struct msix_blacklist *mblp;
805 char *maker, *product;
806 int count, n, use_msix;
809 * Search base board manufacturer and product name table
810 * to see this system has a known MSI/MSI-X issue.
812 maker = getenv("smbios.planar.maker");
813 product = getenv("smbios.planar.product");
815 if (maker != NULL && product != NULL) {
816 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
817 mblp = msix_blacklists;
818 for (n = 0; n < count; n++) {
819 if (strcmp(maker, mblp->maker) == 0 &&
820 strcmp(product, mblp->product) == 0) {
836 /* Take PHY/NIC out of powerdown, from Linux */
838 nfe_power(struct nfe_softc *sc)
842 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
844 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
845 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
847 NFE_WRITE(sc, NFE_MAC_RESET, 0);
849 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
850 pwr = NFE_READ(sc, NFE_PWR2_CTL);
851 pwr &= ~NFE_PWR2_WAKEUP_MASK;
852 if (sc->nfe_revid >= 0xa3 &&
853 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
854 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
855 pwr |= NFE_PWR2_REVA3;
856 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
861 nfe_miibus_statchg(device_t dev)
863 struct nfe_softc *sc;
864 struct mii_data *mii;
866 uint32_t rxctl, txctl;
868 sc = device_get_softc(dev);
870 mii = device_get_softc(sc->nfe_miibus);
874 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
875 (IFM_ACTIVE | IFM_AVALID)) {
876 switch (IFM_SUBTYPE(mii->mii_media_active)) {
887 nfe_mac_config(sc, mii);
888 txctl = NFE_READ(sc, NFE_TX_CTL);
889 rxctl = NFE_READ(sc, NFE_RX_CTL);
890 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
891 txctl |= NFE_TX_START;
892 rxctl |= NFE_RX_START;
894 txctl &= ~NFE_TX_START;
895 rxctl &= ~NFE_RX_START;
897 NFE_WRITE(sc, NFE_TX_CTL, txctl);
898 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
903 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
905 uint32_t link, misc, phy, seed;
910 phy = NFE_READ(sc, NFE_PHY_IFACE);
911 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
913 seed = NFE_READ(sc, NFE_RNDSEED);
914 seed &= ~NFE_SEED_MASK;
916 misc = NFE_MISC1_MAGIC;
917 link = NFE_MEDIA_SET;
919 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
920 phy |= NFE_PHY_HDX; /* half-duplex */
921 misc |= NFE_MISC1_HDX;
924 switch (IFM_SUBTYPE(mii->mii_media_active)) {
925 case IFM_1000_T: /* full-duplex only */
926 link |= NFE_MEDIA_1000T;
927 seed |= NFE_SEED_1000T;
928 phy |= NFE_PHY_1000T;
931 link |= NFE_MEDIA_100TX;
932 seed |= NFE_SEED_100TX;
933 phy |= NFE_PHY_100TX;
936 link |= NFE_MEDIA_10T;
937 seed |= NFE_SEED_10T;
941 if ((phy & 0x10000000) != 0) {
942 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
943 val = NFE_R1_MAGIC_1000;
945 val = NFE_R1_MAGIC_10_100;
947 val = NFE_R1_MAGIC_DEFAULT;
948 NFE_WRITE(sc, NFE_SETUP_R1, val);
950 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
952 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
953 NFE_WRITE(sc, NFE_MISC1, misc);
954 NFE_WRITE(sc, NFE_LINKSPEED, link);
956 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
957 /* It seems all hardwares supports Rx pause frames. */
958 val = NFE_READ(sc, NFE_RXFILTER);
959 if ((IFM_OPTIONS(mii->mii_media_active) &
960 IFM_ETH_RXPAUSE) != 0)
961 val |= NFE_PFF_RX_PAUSE;
963 val &= ~NFE_PFF_RX_PAUSE;
964 NFE_WRITE(sc, NFE_RXFILTER, val);
965 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
966 val = NFE_READ(sc, NFE_MISC1);
967 if ((IFM_OPTIONS(mii->mii_media_active) &
968 IFM_ETH_TXPAUSE) != 0) {
969 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
970 NFE_TX_PAUSE_FRAME_ENABLE);
971 val |= NFE_MISC1_TX_PAUSE;
973 val &= ~NFE_MISC1_TX_PAUSE;
974 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
975 NFE_TX_PAUSE_FRAME_DISABLE);
977 NFE_WRITE(sc, NFE_MISC1, val);
980 /* disable rx/tx pause frames */
981 val = NFE_READ(sc, NFE_RXFILTER);
982 val &= ~NFE_PFF_RX_PAUSE;
983 NFE_WRITE(sc, NFE_RXFILTER, val);
984 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
985 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
986 NFE_TX_PAUSE_FRAME_DISABLE);
987 val = NFE_READ(sc, NFE_MISC1);
988 val &= ~NFE_MISC1_TX_PAUSE;
989 NFE_WRITE(sc, NFE_MISC1, val);
996 nfe_miibus_readreg(device_t dev, int phy, int reg)
998 struct nfe_softc *sc = device_get_softc(dev);
1002 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1004 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1005 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1009 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1011 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1013 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1016 if (ntries == NFE_TIMEOUT) {
1017 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1021 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1022 DPRINTFN(sc, 2, "could not read PHY\n");
1026 val = NFE_READ(sc, NFE_PHY_DATA);
1027 if (val != 0xffffffff && val != 0)
1028 sc->mii_phyaddr = phy;
1030 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1037 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1039 struct nfe_softc *sc = device_get_softc(dev);
1043 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1045 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1046 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1050 NFE_WRITE(sc, NFE_PHY_DATA, val);
1051 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1052 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1054 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1056 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1060 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1061 device_printf(sc->nfe_dev, "could not write to PHY\n");
1066 struct nfe_dmamap_arg {
1067 bus_addr_t nfe_busaddr;
1071 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1073 struct nfe_dmamap_arg ctx;
1074 struct nfe_rx_data *data;
1076 int i, error, descsize;
1078 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1079 desc = ring->desc64;
1080 descsize = sizeof (struct nfe_desc64);
1082 desc = ring->desc32;
1083 descsize = sizeof (struct nfe_desc32);
1086 ring->cur = ring->next = 0;
1088 error = bus_dma_tag_create(sc->nfe_parent_tag,
1089 NFE_RING_ALIGN, 0, /* alignment, boundary */
1090 BUS_SPACE_MAXADDR, /* lowaddr */
1091 BUS_SPACE_MAXADDR, /* highaddr */
1092 NULL, NULL, /* filter, filterarg */
1093 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1094 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1096 NULL, NULL, /* lockfunc, lockarg */
1097 &ring->rx_desc_tag);
1099 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1103 /* allocate memory to desc */
1104 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1105 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1107 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1110 if (sc->nfe_flags & NFE_40BIT_ADDR)
1111 ring->desc64 = desc;
1113 ring->desc32 = desc;
1115 /* map desc to device visible address space */
1116 ctx.nfe_busaddr = 0;
1117 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1118 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1120 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1123 ring->physaddr = ctx.nfe_busaddr;
1125 error = bus_dma_tag_create(sc->nfe_parent_tag,
1126 1, 0, /* alignment, boundary */
1127 BUS_SPACE_MAXADDR, /* lowaddr */
1128 BUS_SPACE_MAXADDR, /* highaddr */
1129 NULL, NULL, /* filter, filterarg */
1130 MCLBYTES, 1, /* maxsize, nsegments */
1131 MCLBYTES, /* maxsegsize */
1133 NULL, NULL, /* lockfunc, lockarg */
1134 &ring->rx_data_tag);
1136 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1140 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1142 device_printf(sc->nfe_dev,
1143 "could not create Rx DMA spare map\n");
1148 * Pre-allocate Rx buffers and populate Rx ring.
1150 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1151 data = &sc->rxq.data[i];
1152 data->rx_data_map = NULL;
1154 error = bus_dmamap_create(ring->rx_data_tag, 0,
1155 &data->rx_data_map);
1157 device_printf(sc->nfe_dev,
1158 "could not create Rx DMA map\n");
1169 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1171 struct nfe_dmamap_arg ctx;
1172 struct nfe_rx_data *data;
1174 int i, error, descsize;
1176 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1178 if (jumbo_disable != 0) {
1179 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1180 sc->nfe_jumbo_disable = 1;
1184 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1185 desc = ring->jdesc64;
1186 descsize = sizeof (struct nfe_desc64);
1188 desc = ring->jdesc32;
1189 descsize = sizeof (struct nfe_desc32);
1192 ring->jcur = ring->jnext = 0;
1194 /* Create DMA tag for jumbo Rx ring. */
1195 error = bus_dma_tag_create(sc->nfe_parent_tag,
1196 NFE_RING_ALIGN, 0, /* alignment, boundary */
1197 BUS_SPACE_MAXADDR, /* lowaddr */
1198 BUS_SPACE_MAXADDR, /* highaddr */
1199 NULL, NULL, /* filter, filterarg */
1200 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1202 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1204 NULL, NULL, /* lockfunc, lockarg */
1205 &ring->jrx_desc_tag);
1207 device_printf(sc->nfe_dev,
1208 "could not create jumbo ring DMA tag\n");
1212 /* Create DMA tag for jumbo Rx buffers. */
1213 error = bus_dma_tag_create(sc->nfe_parent_tag,
1214 1, 0, /* alignment, boundary */
1215 BUS_SPACE_MAXADDR, /* lowaddr */
1216 BUS_SPACE_MAXADDR, /* highaddr */
1217 NULL, NULL, /* filter, filterarg */
1218 MJUM9BYTES, /* maxsize */
1220 MJUM9BYTES, /* maxsegsize */
1222 NULL, NULL, /* lockfunc, lockarg */
1223 &ring->jrx_data_tag);
1225 device_printf(sc->nfe_dev,
1226 "could not create jumbo Rx buffer DMA tag\n");
1230 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1231 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1232 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1234 device_printf(sc->nfe_dev,
1235 "could not allocate DMA'able memory for jumbo Rx ring\n");
1238 if (sc->nfe_flags & NFE_40BIT_ADDR)
1239 ring->jdesc64 = desc;
1241 ring->jdesc32 = desc;
1243 ctx.nfe_busaddr = 0;
1244 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1245 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1247 device_printf(sc->nfe_dev,
1248 "could not load DMA'able memory for jumbo Rx ring\n");
1251 ring->jphysaddr = ctx.nfe_busaddr;
1253 /* Create DMA maps for jumbo Rx buffers. */
1254 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1256 device_printf(sc->nfe_dev,
1257 "could not create jumbo Rx DMA spare map\n");
1261 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1262 data = &sc->jrxq.jdata[i];
1263 data->rx_data_map = NULL;
1265 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1266 &data->rx_data_map);
1268 device_printf(sc->nfe_dev,
1269 "could not create jumbo Rx DMA map\n");
1278 * Running without jumbo frame support is ok for most cases
1279 * so don't fail on creating dma tag/map for jumbo frame.
1281 nfe_free_jrx_ring(sc, ring);
1282 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1283 "resource shortage\n");
1284 sc->nfe_jumbo_disable = 1;
1289 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1295 ring->cur = ring->next = 0;
1296 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1297 desc = ring->desc64;
1298 descsize = sizeof (struct nfe_desc64);
1300 desc = ring->desc32;
1301 descsize = sizeof (struct nfe_desc32);
1303 bzero(desc, descsize * NFE_RX_RING_COUNT);
1304 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1305 if (nfe_newbuf(sc, i) != 0)
1309 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1310 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1317 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1323 ring->jcur = ring->jnext = 0;
1324 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1325 desc = ring->jdesc64;
1326 descsize = sizeof (struct nfe_desc64);
1328 desc = ring->jdesc32;
1329 descsize = sizeof (struct nfe_desc32);
1331 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1332 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1333 if (nfe_jnewbuf(sc, i) != 0)
1337 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1338 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1345 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1347 struct nfe_rx_data *data;
1351 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1352 desc = ring->desc64;
1353 descsize = sizeof (struct nfe_desc64);
1355 desc = ring->desc32;
1356 descsize = sizeof (struct nfe_desc32);
1359 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1360 data = &ring->data[i];
1361 if (data->rx_data_map != NULL) {
1362 bus_dmamap_destroy(ring->rx_data_tag,
1364 data->rx_data_map = NULL;
1366 if (data->m != NULL) {
1371 if (ring->rx_data_tag != NULL) {
1372 if (ring->rx_spare_map != NULL) {
1373 bus_dmamap_destroy(ring->rx_data_tag,
1374 ring->rx_spare_map);
1375 ring->rx_spare_map = NULL;
1377 bus_dma_tag_destroy(ring->rx_data_tag);
1378 ring->rx_data_tag = NULL;
1382 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1383 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1384 ring->desc64 = NULL;
1385 ring->desc32 = NULL;
1386 ring->rx_desc_map = NULL;
1388 if (ring->rx_desc_tag != NULL) {
1389 bus_dma_tag_destroy(ring->rx_desc_tag);
1390 ring->rx_desc_tag = NULL;
1396 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1398 struct nfe_rx_data *data;
1402 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1405 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1406 desc = ring->jdesc64;
1407 descsize = sizeof (struct nfe_desc64);
1409 desc = ring->jdesc32;
1410 descsize = sizeof (struct nfe_desc32);
1413 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1414 data = &ring->jdata[i];
1415 if (data->rx_data_map != NULL) {
1416 bus_dmamap_destroy(ring->jrx_data_tag,
1418 data->rx_data_map = NULL;
1420 if (data->m != NULL) {
1425 if (ring->jrx_data_tag != NULL) {
1426 if (ring->jrx_spare_map != NULL) {
1427 bus_dmamap_destroy(ring->jrx_data_tag,
1428 ring->jrx_spare_map);
1429 ring->jrx_spare_map = NULL;
1431 bus_dma_tag_destroy(ring->jrx_data_tag);
1432 ring->jrx_data_tag = NULL;
1436 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1437 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1438 ring->jdesc64 = NULL;
1439 ring->jdesc32 = NULL;
1440 ring->jrx_desc_map = NULL;
1443 if (ring->jrx_desc_tag != NULL) {
1444 bus_dma_tag_destroy(ring->jrx_desc_tag);
1445 ring->jrx_desc_tag = NULL;
1451 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1453 struct nfe_dmamap_arg ctx;
1458 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1459 desc = ring->desc64;
1460 descsize = sizeof (struct nfe_desc64);
1462 desc = ring->desc32;
1463 descsize = sizeof (struct nfe_desc32);
1467 ring->cur = ring->next = 0;
1469 error = bus_dma_tag_create(sc->nfe_parent_tag,
1470 NFE_RING_ALIGN, 0, /* alignment, boundary */
1471 BUS_SPACE_MAXADDR, /* lowaddr */
1472 BUS_SPACE_MAXADDR, /* highaddr */
1473 NULL, NULL, /* filter, filterarg */
1474 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1475 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1477 NULL, NULL, /* lockfunc, lockarg */
1478 &ring->tx_desc_tag);
1480 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1484 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1485 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1487 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1490 if (sc->nfe_flags & NFE_40BIT_ADDR)
1491 ring->desc64 = desc;
1493 ring->desc32 = desc;
1495 ctx.nfe_busaddr = 0;
1496 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1497 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1499 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1502 ring->physaddr = ctx.nfe_busaddr;
1504 error = bus_dma_tag_create(sc->nfe_parent_tag,
1514 &ring->tx_data_tag);
1516 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1520 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1521 error = bus_dmamap_create(ring->tx_data_tag, 0,
1522 &ring->data[i].tx_data_map);
1524 device_printf(sc->nfe_dev,
1525 "could not create Tx DMA map\n");
1536 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1541 sc->nfe_force_tx = 0;
1543 ring->cur = ring->next = 0;
1544 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1545 desc = ring->desc64;
1546 descsize = sizeof (struct nfe_desc64);
1548 desc = ring->desc32;
1549 descsize = sizeof (struct nfe_desc32);
1551 bzero(desc, descsize * NFE_TX_RING_COUNT);
1553 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1554 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1559 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1561 struct nfe_tx_data *data;
1565 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1566 desc = ring->desc64;
1567 descsize = sizeof (struct nfe_desc64);
1569 desc = ring->desc32;
1570 descsize = sizeof (struct nfe_desc32);
1573 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1574 data = &ring->data[i];
1576 if (data->m != NULL) {
1577 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1578 BUS_DMASYNC_POSTWRITE);
1579 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1583 if (data->tx_data_map != NULL) {
1584 bus_dmamap_destroy(ring->tx_data_tag,
1586 data->tx_data_map = NULL;
1590 if (ring->tx_data_tag != NULL) {
1591 bus_dma_tag_destroy(ring->tx_data_tag);
1592 ring->tx_data_tag = NULL;
1596 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1597 BUS_DMASYNC_POSTWRITE);
1598 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1599 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1600 ring->desc64 = NULL;
1601 ring->desc32 = NULL;
1602 ring->tx_desc_map = NULL;
1603 bus_dma_tag_destroy(ring->tx_desc_tag);
1604 ring->tx_desc_tag = NULL;
1608 #ifdef DEVICE_POLLING
1609 static poll_handler_t nfe_poll;
1613 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1615 struct nfe_softc *sc = ifp->if_softc;
1621 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1626 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1627 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1629 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1631 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1632 nfe_start_locked(ifp);
1634 if (cmd == POLL_AND_CHECK_STATUS) {
1635 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1639 NFE_WRITE(sc, sc->nfe_irq_status, r);
1641 if (r & NFE_IRQ_LINK) {
1642 NFE_READ(sc, NFE_PHY_STATUS);
1643 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1644 DPRINTF(sc, "link state changed\n");
1650 #endif /* DEVICE_POLLING */
1653 nfe_set_intr(struct nfe_softc *sc)
1656 if (sc->nfe_msi != 0)
1657 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1661 /* In MSIX, a write to mask reegisters behaves as XOR. */
1662 static __inline void
1663 nfe_enable_intr(struct nfe_softc *sc)
1666 if (sc->nfe_msix != 0) {
1667 /* XXX Should have a better way to enable interrupts! */
1668 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1669 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1671 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1675 static __inline void
1676 nfe_disable_intr(struct nfe_softc *sc)
1679 if (sc->nfe_msix != 0) {
1680 /* XXX Should have a better way to disable interrupts! */
1681 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1682 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1684 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1689 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1691 struct nfe_softc *sc;
1693 struct mii_data *mii;
1694 int error, init, mask;
1697 ifr = (struct ifreq *) data;
1702 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1704 else if (ifp->if_mtu != ifr->ifr_mtu) {
1705 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1706 (sc->nfe_jumbo_disable != 0)) &&
1707 ifr->ifr_mtu > ETHERMTU)
1711 ifp->if_mtu = ifr->ifr_mtu;
1712 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1713 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1714 nfe_init_locked(sc);
1722 if (ifp->if_flags & IFF_UP) {
1724 * If only the PROMISC or ALLMULTI flag changes, then
1725 * don't do a full re-init of the chip, just update
1728 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1729 ((ifp->if_flags ^ sc->nfe_if_flags) &
1730 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1733 nfe_init_locked(sc);
1735 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1738 sc->nfe_if_flags = ifp->if_flags;
1744 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1753 mii = device_get_softc(sc->nfe_miibus);
1754 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1757 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1758 #ifdef DEVICE_POLLING
1759 if ((mask & IFCAP_POLLING) != 0) {
1760 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1761 error = ether_poll_register(nfe_poll, ifp);
1765 nfe_disable_intr(sc);
1766 ifp->if_capenable |= IFCAP_POLLING;
1769 error = ether_poll_deregister(ifp);
1770 /* Enable interrupt even in error case */
1772 nfe_enable_intr(sc);
1773 ifp->if_capenable &= ~IFCAP_POLLING;
1777 #endif /* DEVICE_POLLING */
1778 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1779 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1780 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1781 if ((mask & IFCAP_TXCSUM) != 0 &&
1782 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1783 ifp->if_capenable ^= IFCAP_TXCSUM;
1784 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1785 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1787 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1789 if ((mask & IFCAP_RXCSUM) != 0 &&
1790 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1791 ifp->if_capenable ^= IFCAP_RXCSUM;
1794 if ((mask & IFCAP_TSO4) != 0 &&
1795 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1796 ifp->if_capenable ^= IFCAP_TSO4;
1797 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1798 ifp->if_hwassist |= CSUM_TSO;
1800 ifp->if_hwassist &= ~CSUM_TSO;
1802 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1803 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1804 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1805 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1806 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1807 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1808 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1809 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
1814 * It seems that VLAN stripping requires Rx checksum offload.
1815 * Unfortunately FreeBSD has no way to disable only Rx side
1816 * VLAN stripping. So when we know Rx checksum offload is
1817 * disabled turn entire hardware VLAN assist off.
1819 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
1820 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
1822 ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING |
1825 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1826 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1829 VLAN_CAPABILITIES(ifp);
1832 error = ether_ioctl(ifp, cmd, data);
1843 struct nfe_softc *sc;
1846 sc = (struct nfe_softc *)arg;
1848 status = NFE_READ(sc, sc->nfe_irq_status);
1849 if (status == 0 || status == 0xffffffff)
1850 return (FILTER_STRAY);
1851 nfe_disable_intr(sc);
1852 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1854 return (FILTER_HANDLED);
1859 nfe_int_task(void *arg, int pending)
1861 struct nfe_softc *sc = arg;
1862 struct ifnet *ifp = sc->nfe_ifp;
1868 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1869 nfe_enable_intr(sc);
1871 return; /* not for us */
1873 NFE_WRITE(sc, sc->nfe_irq_status, r);
1875 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1877 #ifdef DEVICE_POLLING
1878 if (ifp->if_capenable & IFCAP_POLLING) {
1884 if (r & NFE_IRQ_LINK) {
1885 NFE_READ(sc, NFE_PHY_STATUS);
1886 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1887 DPRINTF(sc, "link state changed\n");
1890 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1892 nfe_enable_intr(sc);
1898 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1899 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1901 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1905 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1906 nfe_start_locked(ifp);
1910 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1911 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1915 /* Reenable interrupts. */
1916 nfe_enable_intr(sc);
1920 static __inline void
1921 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1923 struct nfe_desc32 *desc32;
1924 struct nfe_desc64 *desc64;
1925 struct nfe_rx_data *data;
1928 data = &sc->rxq.data[idx];
1931 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1932 desc64 = &sc->rxq.desc64[idx];
1933 /* VLAN packet may have overwritten it. */
1934 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1935 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1936 desc64->length = htole16(m->m_len);
1937 desc64->flags = htole16(NFE_RX_READY);
1939 desc32 = &sc->rxq.desc32[idx];
1940 desc32->length = htole16(m->m_len);
1941 desc32->flags = htole16(NFE_RX_READY);
1946 static __inline void
1947 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1949 struct nfe_desc32 *desc32;
1950 struct nfe_desc64 *desc64;
1951 struct nfe_rx_data *data;
1954 data = &sc->jrxq.jdata[idx];
1957 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1958 desc64 = &sc->jrxq.jdesc64[idx];
1959 /* VLAN packet may have overwritten it. */
1960 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1961 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1962 desc64->length = htole16(m->m_len);
1963 desc64->flags = htole16(NFE_RX_READY);
1965 desc32 = &sc->jrxq.jdesc32[idx];
1966 desc32->length = htole16(m->m_len);
1967 desc32->flags = htole16(NFE_RX_READY);
1973 nfe_newbuf(struct nfe_softc *sc, int idx)
1975 struct nfe_rx_data *data;
1976 struct nfe_desc32 *desc32;
1977 struct nfe_desc64 *desc64;
1979 bus_dma_segment_t segs[1];
1983 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1987 m->m_len = m->m_pkthdr.len = MCLBYTES;
1988 m_adj(m, ETHER_ALIGN);
1990 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1991 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1995 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1997 data = &sc->rxq.data[idx];
1998 if (data->m != NULL) {
1999 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2000 BUS_DMASYNC_POSTREAD);
2001 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
2003 map = data->rx_data_map;
2004 data->rx_data_map = sc->rxq.rx_spare_map;
2005 sc->rxq.rx_spare_map = map;
2006 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2007 BUS_DMASYNC_PREREAD);
2008 data->paddr = segs[0].ds_addr;
2010 /* update mapping address in h/w descriptor */
2011 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2012 desc64 = &sc->rxq.desc64[idx];
2013 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2014 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2015 desc64->length = htole16(segs[0].ds_len);
2016 desc64->flags = htole16(NFE_RX_READY);
2018 desc32 = &sc->rxq.desc32[idx];
2019 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2020 desc32->length = htole16(segs[0].ds_len);
2021 desc32->flags = htole16(NFE_RX_READY);
2029 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2031 struct nfe_rx_data *data;
2032 struct nfe_desc32 *desc32;
2033 struct nfe_desc64 *desc64;
2035 bus_dma_segment_t segs[1];
2039 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2042 if ((m->m_flags & M_EXT) == 0) {
2046 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2047 m_adj(m, ETHER_ALIGN);
2049 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2050 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2054 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2056 data = &sc->jrxq.jdata[idx];
2057 if (data->m != NULL) {
2058 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2059 BUS_DMASYNC_POSTREAD);
2060 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2062 map = data->rx_data_map;
2063 data->rx_data_map = sc->jrxq.jrx_spare_map;
2064 sc->jrxq.jrx_spare_map = map;
2065 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2066 BUS_DMASYNC_PREREAD);
2067 data->paddr = segs[0].ds_addr;
2069 /* update mapping address in h/w descriptor */
2070 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2071 desc64 = &sc->jrxq.jdesc64[idx];
2072 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2073 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2074 desc64->length = htole16(segs[0].ds_len);
2075 desc64->flags = htole16(NFE_RX_READY);
2077 desc32 = &sc->jrxq.jdesc32[idx];
2078 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2079 desc32->length = htole16(segs[0].ds_len);
2080 desc32->flags = htole16(NFE_RX_READY);
2088 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2090 struct ifnet *ifp = sc->nfe_ifp;
2091 struct nfe_desc32 *desc32;
2092 struct nfe_desc64 *desc64;
2093 struct nfe_rx_data *data;
2096 int len, prog, rx_npkts;
2100 NFE_LOCK_ASSERT(sc);
2102 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2103 BUS_DMASYNC_POSTREAD);
2105 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2110 data = &sc->rxq.data[sc->rxq.cur];
2112 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2113 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2114 vtag = le32toh(desc64->physaddr[1]);
2115 flags = le16toh(desc64->flags);
2116 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2118 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2119 flags = le16toh(desc32->flags);
2120 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2123 if (flags & NFE_RX_READY)
2126 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2127 if (!(flags & NFE_RX_VALID_V1)) {
2129 nfe_discard_rxbuf(sc, sc->rxq.cur);
2132 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2133 flags &= ~NFE_RX_ERROR;
2134 len--; /* fix buffer length */
2137 if (!(flags & NFE_RX_VALID_V2)) {
2139 nfe_discard_rxbuf(sc, sc->rxq.cur);
2143 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2144 flags &= ~NFE_RX_ERROR;
2145 len--; /* fix buffer length */
2149 if (flags & NFE_RX_ERROR) {
2151 nfe_discard_rxbuf(sc, sc->rxq.cur);
2156 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2158 nfe_discard_rxbuf(sc, sc->rxq.cur);
2162 if ((vtag & NFE_RX_VTAG) != 0 &&
2163 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2164 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2165 m->m_flags |= M_VLANTAG;
2168 m->m_pkthdr.len = m->m_len = len;
2169 m->m_pkthdr.rcvif = ifp;
2171 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2172 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2173 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2174 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2175 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2176 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2177 m->m_pkthdr.csum_flags |=
2178 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2179 m->m_pkthdr.csum_data = 0xffff;
2187 (*ifp->if_input)(ifp, m);
2193 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2194 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2196 if (rx_npktsp != NULL)
2197 *rx_npktsp = rx_npkts;
2198 return (count > 0 ? 0 : EAGAIN);
2203 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2205 struct ifnet *ifp = sc->nfe_ifp;
2206 struct nfe_desc32 *desc32;
2207 struct nfe_desc64 *desc64;
2208 struct nfe_rx_data *data;
2211 int len, prog, rx_npkts;
2215 NFE_LOCK_ASSERT(sc);
2217 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2218 BUS_DMASYNC_POSTREAD);
2220 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2226 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2228 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2229 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2230 vtag = le32toh(desc64->physaddr[1]);
2231 flags = le16toh(desc64->flags);
2232 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2234 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2235 flags = le16toh(desc32->flags);
2236 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2239 if (flags & NFE_RX_READY)
2242 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2243 if (!(flags & NFE_RX_VALID_V1)) {
2245 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2248 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2249 flags &= ~NFE_RX_ERROR;
2250 len--; /* fix buffer length */
2253 if (!(flags & NFE_RX_VALID_V2)) {
2255 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2259 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2260 flags &= ~NFE_RX_ERROR;
2261 len--; /* fix buffer length */
2265 if (flags & NFE_RX_ERROR) {
2267 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2272 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2274 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2278 if ((vtag & NFE_RX_VTAG) != 0 &&
2279 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2280 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2281 m->m_flags |= M_VLANTAG;
2284 m->m_pkthdr.len = m->m_len = len;
2285 m->m_pkthdr.rcvif = ifp;
2287 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2288 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2289 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2290 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2291 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2292 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2293 m->m_pkthdr.csum_flags |=
2294 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2295 m->m_pkthdr.csum_data = 0xffff;
2303 (*ifp->if_input)(ifp, m);
2309 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2310 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2312 if (rx_npktsp != NULL)
2313 *rx_npktsp = rx_npkts;
2314 return (count > 0 ? 0 : EAGAIN);
2319 nfe_txeof(struct nfe_softc *sc)
2321 struct ifnet *ifp = sc->nfe_ifp;
2322 struct nfe_desc32 *desc32;
2323 struct nfe_desc64 *desc64;
2324 struct nfe_tx_data *data = NULL;
2328 NFE_LOCK_ASSERT(sc);
2330 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2331 BUS_DMASYNC_POSTREAD);
2334 for (cons = sc->txq.next; cons != sc->txq.cur;
2335 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2336 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2337 desc64 = &sc->txq.desc64[cons];
2338 flags = le16toh(desc64->flags);
2340 desc32 = &sc->txq.desc32[cons];
2341 flags = le16toh(desc32->flags);
2344 if (flags & NFE_TX_VALID)
2349 data = &sc->txq.data[cons];
2351 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2352 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2354 if ((flags & NFE_TX_ERROR_V1) != 0) {
2355 device_printf(sc->nfe_dev,
2356 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2362 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2364 if ((flags & NFE_TX_ERROR_V2) != 0) {
2365 device_printf(sc->nfe_dev,
2366 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2372 /* last fragment of the mbuf chain transmitted */
2373 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2374 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2375 BUS_DMASYNC_POSTWRITE);
2376 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2382 sc->nfe_force_tx = 0;
2383 sc->txq.next = cons;
2384 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2385 if (sc->txq.queued == 0)
2386 sc->nfe_watchdog_timer = 0;
2391 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2393 struct nfe_desc32 *desc32 = NULL;
2394 struct nfe_desc64 *desc64 = NULL;
2396 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2397 int error, i, nsegs, prod, si;
2399 uint16_t cflags, flags;
2402 prod = si = sc->txq.cur;
2403 map = sc->txq.data[prod].tx_data_map;
2405 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2406 &nsegs, BUS_DMA_NOWAIT);
2407 if (error == EFBIG) {
2408 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2415 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2416 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2422 } else if (error != 0)
2430 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2431 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2438 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2439 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2441 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2442 cflags |= NFE_TX_TSO;
2443 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2444 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2445 cflags |= NFE_TX_IP_CSUM;
2446 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2447 cflags |= NFE_TX_TCP_UDP_CSUM;
2448 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2449 cflags |= NFE_TX_TCP_UDP_CSUM;
2452 for (i = 0; i < nsegs; i++) {
2453 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2454 desc64 = &sc->txq.desc64[prod];
2455 desc64->physaddr[0] =
2456 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2457 desc64->physaddr[1] =
2458 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2460 desc64->length = htole16(segs[i].ds_len - 1);
2461 desc64->flags = htole16(flags);
2463 desc32 = &sc->txq.desc32[prod];
2465 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2466 desc32->length = htole16(segs[i].ds_len - 1);
2467 desc32->flags = htole16(flags);
2471 * Setting of the valid bit in the first descriptor is
2472 * deferred until the whole chain is fully setup.
2474 flags |= NFE_TX_VALID;
2477 NFE_INC(prod, NFE_TX_RING_COUNT);
2481 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2482 * csum flags, vtag and TSO belong to the first fragment only.
2484 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2485 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2486 desc64 = &sc->txq.desc64[si];
2487 if ((m->m_flags & M_VLANTAG) != 0)
2488 desc64->vtag = htole32(NFE_TX_VTAG |
2489 m->m_pkthdr.ether_vtag);
2490 if (tso_segsz != 0) {
2493 * The following indicates the descriptor element
2494 * is a 32bit quantity.
2496 desc64->length |= htole16((uint16_t)tso_segsz);
2497 desc64->flags |= htole16(tso_segsz >> 16);
2500 * finally, set the valid/checksum/TSO bit in the first
2503 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2505 if (sc->nfe_flags & NFE_JUMBO_SUP)
2506 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2508 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2509 desc32 = &sc->txq.desc32[si];
2510 if (tso_segsz != 0) {
2513 * The following indicates the descriptor element
2514 * is a 32bit quantity.
2516 desc32->length |= htole16((uint16_t)tso_segsz);
2517 desc32->flags |= htole16(tso_segsz >> 16);
2520 * finally, set the valid/checksum/TSO bit in the first
2523 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2527 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2528 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2529 sc->txq.data[prod].tx_data_map = map;
2530 sc->txq.data[prod].m = m;
2532 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2539 nfe_setmulti(struct nfe_softc *sc)
2541 struct ifnet *ifp = sc->nfe_ifp;
2542 struct ifmultiaddr *ifma;
2545 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2546 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2550 NFE_LOCK_ASSERT(sc);
2552 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2553 bzero(addr, ETHER_ADDR_LEN);
2554 bzero(mask, ETHER_ADDR_LEN);
2558 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2559 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2561 if_maddr_rlock(ifp);
2562 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2565 if (ifma->ifma_addr->sa_family != AF_LINK)
2568 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2569 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2570 u_int8_t mcaddr = addrp[i];
2575 if_maddr_runlock(ifp);
2577 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2582 addr[0] |= 0x01; /* make sure multicast bit is set */
2584 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2585 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2586 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2587 addr[5] << 8 | addr[4]);
2588 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2589 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2590 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2591 mask[5] << 8 | mask[4]);
2593 filter = NFE_READ(sc, NFE_RXFILTER);
2594 filter &= NFE_PFF_RX_PAUSE;
2595 filter |= NFE_RXFILTER_MAGIC;
2596 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2597 NFE_WRITE(sc, NFE_RXFILTER, filter);
2602 nfe_start(struct ifnet *ifp)
2604 struct nfe_softc *sc = ifp->if_softc;
2607 nfe_start_locked(ifp);
2612 nfe_start_locked(struct ifnet *ifp)
2614 struct nfe_softc *sc = ifp->if_softc;
2618 NFE_LOCK_ASSERT(sc);
2620 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2621 IFF_DRV_RUNNING || sc->nfe_link == 0)
2624 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2625 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2629 if (nfe_encap(sc, &m0) != 0) {
2632 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2633 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2637 ETHER_BPF_MTAP(ifp, m0);
2641 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2642 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2645 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2648 * Set a timeout in case the chip goes out to lunch.
2650 sc->nfe_watchdog_timer = 5;
2656 nfe_watchdog(struct ifnet *ifp)
2658 struct nfe_softc *sc = ifp->if_softc;
2660 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2663 /* Check if we've lost Tx completion interrupt. */
2665 if (sc->txq.queued == 0) {
2666 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2668 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2669 nfe_start_locked(ifp);
2672 /* Check if we've lost start Tx command. */
2674 if (sc->nfe_force_tx <= 3) {
2676 * If this is the case for watchdog timeout, the following
2677 * code should go to nfe_txeof().
2679 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2682 sc->nfe_force_tx = 0;
2684 if_printf(ifp, "watchdog timeout\n");
2686 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2688 nfe_init_locked(sc);
2695 struct nfe_softc *sc = xsc;
2698 nfe_init_locked(sc);
2704 nfe_init_locked(void *xsc)
2706 struct nfe_softc *sc = xsc;
2707 struct ifnet *ifp = sc->nfe_ifp;
2708 struct mii_data *mii;
2712 NFE_LOCK_ASSERT(sc);
2714 mii = device_get_softc(sc->nfe_miibus);
2716 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2721 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2723 nfe_init_tx_ring(sc, &sc->txq);
2724 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2725 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2727 error = nfe_init_rx_ring(sc, &sc->rxq);
2729 device_printf(sc->nfe_dev,
2730 "initialization failed: no memory for rx buffers\n");
2736 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2737 val |= NFE_MAC_ADDR_INORDER;
2738 NFE_WRITE(sc, NFE_TX_UNK, val);
2739 NFE_WRITE(sc, NFE_STATUS, 0);
2741 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2742 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2744 sc->rxtxctl = NFE_RXTX_BIT2;
2745 if (sc->nfe_flags & NFE_40BIT_ADDR)
2746 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2747 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2748 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2750 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2751 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2752 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2753 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2755 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2757 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2759 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2760 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2762 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2764 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2766 /* set MAC address */
2767 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2769 /* tell MAC where rings are in memory */
2770 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2771 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2772 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2773 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2774 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2776 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2777 NFE_ADDR_HI(sc->rxq.physaddr));
2778 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2779 NFE_ADDR_LO(sc->rxq.physaddr));
2781 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2782 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2784 NFE_WRITE(sc, NFE_RING_SIZE,
2785 (NFE_RX_RING_COUNT - 1) << 16 |
2786 (NFE_TX_RING_COUNT - 1));
2788 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2790 /* force MAC to wakeup */
2791 val = NFE_READ(sc, NFE_PWR_STATE);
2792 if ((val & NFE_PWR_WAKEUP) == 0)
2793 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2795 val = NFE_READ(sc, NFE_PWR_STATE);
2796 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2799 /* configure interrupts coalescing/mitigation */
2800 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2802 /* no interrupt mitigation: one interrupt per packet */
2803 NFE_WRITE(sc, NFE_IMTIMER, 970);
2806 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2807 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2808 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2810 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2811 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2813 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2815 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2817 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2818 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2820 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2826 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2829 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2831 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2833 /* Clear hardware stats. */
2834 nfe_stats_clear(sc);
2836 #ifdef DEVICE_POLLING
2837 if (ifp->if_capenable & IFCAP_POLLING)
2838 nfe_disable_intr(sc);
2842 nfe_enable_intr(sc); /* enable interrupts */
2844 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2845 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2850 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2855 nfe_stop(struct ifnet *ifp)
2857 struct nfe_softc *sc = ifp->if_softc;
2858 struct nfe_rx_ring *rx_ring;
2859 struct nfe_jrx_ring *jrx_ring;
2860 struct nfe_tx_ring *tx_ring;
2861 struct nfe_rx_data *rdata;
2862 struct nfe_tx_data *tdata;
2865 NFE_LOCK_ASSERT(sc);
2867 sc->nfe_watchdog_timer = 0;
2868 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2870 callout_stop(&sc->nfe_stat_ch);
2873 NFE_WRITE(sc, NFE_TX_CTL, 0);
2876 NFE_WRITE(sc, NFE_RX_CTL, 0);
2878 /* disable interrupts */
2879 nfe_disable_intr(sc);
2883 /* free Rx and Tx mbufs still in the queues. */
2885 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2886 rdata = &rx_ring->data[i];
2887 if (rdata->m != NULL) {
2888 bus_dmamap_sync(rx_ring->rx_data_tag,
2889 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2890 bus_dmamap_unload(rx_ring->rx_data_tag,
2891 rdata->rx_data_map);
2897 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2898 jrx_ring = &sc->jrxq;
2899 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2900 rdata = &jrx_ring->jdata[i];
2901 if (rdata->m != NULL) {
2902 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2903 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2904 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2905 rdata->rx_data_map);
2913 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2914 tdata = &tx_ring->data[i];
2915 if (tdata->m != NULL) {
2916 bus_dmamap_sync(tx_ring->tx_data_tag,
2917 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2918 bus_dmamap_unload(tx_ring->tx_data_tag,
2919 tdata->tx_data_map);
2924 /* Update hardware stats. */
2925 nfe_stats_update(sc);
2930 nfe_ifmedia_upd(struct ifnet *ifp)
2932 struct nfe_softc *sc = ifp->if_softc;
2933 struct mii_data *mii;
2936 mii = device_get_softc(sc->nfe_miibus);
2945 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2947 struct nfe_softc *sc;
2948 struct mii_data *mii;
2953 mii = device_get_softc(sc->nfe_miibus);
2957 ifmr->ifm_active = mii->mii_media_active;
2958 ifmr->ifm_status = mii->mii_media_status;
2965 struct nfe_softc *sc;
2966 struct mii_data *mii;
2969 sc = (struct nfe_softc *)xsc;
2971 NFE_LOCK_ASSERT(sc);
2975 mii = device_get_softc(sc->nfe_miibus);
2977 nfe_stats_update(sc);
2979 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2984 nfe_shutdown(device_t dev)
2987 return (nfe_suspend(dev));
2992 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2996 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2997 val = NFE_READ(sc, NFE_MACADDR_LO);
2998 addr[0] = (val >> 8) & 0xff;
2999 addr[1] = (val & 0xff);
3001 val = NFE_READ(sc, NFE_MACADDR_HI);
3002 addr[2] = (val >> 24) & 0xff;
3003 addr[3] = (val >> 16) & 0xff;
3004 addr[4] = (val >> 8) & 0xff;
3005 addr[5] = (val & 0xff);
3007 val = NFE_READ(sc, NFE_MACADDR_LO);
3008 addr[5] = (val >> 8) & 0xff;
3009 addr[4] = (val & 0xff);
3011 val = NFE_READ(sc, NFE_MACADDR_HI);
3012 addr[3] = (val >> 24) & 0xff;
3013 addr[2] = (val >> 16) & 0xff;
3014 addr[1] = (val >> 8) & 0xff;
3015 addr[0] = (val & 0xff);
3021 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3024 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3025 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3026 addr[1] << 8 | addr[0]);
3031 * Map a single buffer address.
3035 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3037 struct nfe_dmamap_arg *ctx;
3042 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3044 ctx = (struct nfe_dmamap_arg *)arg;
3045 ctx->nfe_busaddr = segs[0].ds_addr;
3050 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3056 value = *(int *)arg1;
3057 error = sysctl_handle_int(oidp, &value, 0, req);
3058 if (error || !req->newptr)
3060 if (value < low || value > high)
3062 *(int *)arg1 = value;
3069 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3072 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3077 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3078 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3079 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3080 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3083 nfe_sysctl_node(struct nfe_softc *sc)
3085 struct sysctl_ctx_list *ctx;
3086 struct sysctl_oid_list *child, *parent;
3087 struct sysctl_oid *tree;
3088 struct nfe_hw_stats *stats;
3091 stats = &sc->nfe_stats;
3092 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3093 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3094 SYSCTL_ADD_PROC(ctx, child,
3095 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3096 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3097 "max number of Rx events to process");
3099 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3100 error = resource_int_value(device_get_name(sc->nfe_dev),
3101 device_get_unit(sc->nfe_dev), "process_limit",
3102 &sc->nfe_process_limit);
3104 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3105 sc->nfe_process_limit > NFE_PROC_MAX) {
3106 device_printf(sc->nfe_dev,
3107 "process_limit value out of range; "
3108 "using default: %d\n", NFE_PROC_DEFAULT);
3109 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3113 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3116 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3117 NULL, "NFE statistics");
3118 parent = SYSCTL_CHILDREN(tree);
3120 /* Rx statistics. */
3121 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3122 NULL, "Rx MAC statistics");
3123 child = SYSCTL_CHILDREN(tree);
3125 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3126 &stats->rx_frame_errors, "Framing Errors");
3127 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3128 &stats->rx_extra_bytes, "Extra Bytes");
3129 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3130 &stats->rx_late_cols, "Late Collisions");
3131 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3132 &stats->rx_runts, "Runts");
3133 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3134 &stats->rx_jumbos, "Jumbos");
3135 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3136 &stats->rx_fifo_overuns, "FIFO Overruns");
3137 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3138 &stats->rx_crc_errors, "CRC Errors");
3139 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3140 &stats->rx_fae, "Frame Alignment Errors");
3141 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3142 &stats->rx_len_errors, "Length Errors");
3143 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3144 &stats->rx_unicast, "Unicast Frames");
3145 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3146 &stats->rx_multicast, "Multicast Frames");
3147 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3148 &stats->rx_broadcast, "Broadcast Frames");
3149 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3150 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3151 &stats->rx_octets, "Octets");
3152 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3153 &stats->rx_pause, "Pause frames");
3154 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3155 &stats->rx_drops, "Drop frames");
3158 /* Tx statistics. */
3159 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3160 NULL, "Tx MAC statistics");
3161 child = SYSCTL_CHILDREN(tree);
3162 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3163 &stats->tx_octets, "Octets");
3164 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3165 &stats->tx_zero_rexmits, "Zero Retransmits");
3166 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3167 &stats->tx_one_rexmits, "One Retransmits");
3168 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3169 &stats->tx_multi_rexmits, "Multiple Retransmits");
3170 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3171 &stats->tx_late_cols, "Late Collisions");
3172 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3173 &stats->tx_fifo_underuns, "FIFO Underruns");
3174 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3175 &stats->tx_carrier_losts, "Carrier Losts");
3176 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3177 &stats->tx_excess_deferals, "Excess Deferrals");
3178 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3179 &stats->tx_retry_errors, "Retry Errors");
3180 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3181 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3182 &stats->tx_deferals, "Deferrals");
3183 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3184 &stats->tx_frames, "Frames");
3185 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3186 &stats->tx_pause, "Pause Frames");
3188 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3189 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3190 &stats->tx_deferals, "Unicast Frames");
3191 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3192 &stats->tx_frames, "Multicast Frames");
3193 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3194 &stats->tx_pause, "Broadcast Frames");
3198 #undef NFE_SYSCTL_STAT_ADD32
3199 #undef NFE_SYSCTL_STAT_ADD64
3202 nfe_stats_clear(struct nfe_softc *sc)
3206 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3207 mib_cnt = NFE_NUM_MIB_STATV1;
3208 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3209 mib_cnt = NFE_NUM_MIB_STATV2;
3213 for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3214 NFE_READ(sc, NFE_TX_OCTET + i);
3216 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3217 NFE_READ(sc, NFE_TX_UNICAST);
3218 NFE_READ(sc, NFE_TX_MULTICAST);
3219 NFE_READ(sc, NFE_TX_BROADCAST);
3224 nfe_stats_update(struct nfe_softc *sc)
3226 struct nfe_hw_stats *stats;
3228 NFE_LOCK_ASSERT(sc);
3230 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3233 stats = &sc->nfe_stats;
3234 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3235 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3236 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3237 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3238 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3239 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3240 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3241 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3242 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3243 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3244 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3245 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3246 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3247 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3248 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3249 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3250 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3251 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3252 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3253 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3254 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3256 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3257 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3258 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3259 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3260 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3261 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3262 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3265 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3266 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3267 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3268 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3274 nfe_set_linkspeed(struct nfe_softc *sc)
3276 struct mii_softc *miisc;
3277 struct mii_data *mii;
3280 NFE_LOCK_ASSERT(sc);
3282 mii = device_get_softc(sc->nfe_miibus);
3285 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3286 (IFM_ACTIVE | IFM_AVALID)) {
3287 switch IFM_SUBTYPE(mii->mii_media_active) {
3298 miisc = LIST_FIRST(&mii->mii_phys);
3299 phyno = miisc->mii_phy;
3300 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3302 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3303 nfe_miibus_writereg(sc->nfe_dev, phyno,
3304 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3305 nfe_miibus_writereg(sc->nfe_dev, phyno,
3306 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3310 * Poll link state until nfe(4) get a 10/100Mbps link.
3312 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3314 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3315 == (IFM_ACTIVE | IFM_AVALID)) {
3316 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3319 nfe_mac_config(sc, mii);
3326 pause("nfelnk", hz);
3329 if (i == MII_ANEGTICKS_GIGE)
3330 device_printf(sc->nfe_dev,
3331 "establishing a link failed, WOL may not work!");
3334 * No link, force MAC to have 100Mbps, full-duplex link.
3335 * This is the last resort and may/may not work.
3337 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3338 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3339 nfe_mac_config(sc, mii);
3344 nfe_set_wol(struct nfe_softc *sc)
3351 NFE_LOCK_ASSERT(sc);
3353 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3356 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3357 wolctl = NFE_WOL_MAGIC;
3360 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3361 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3362 nfe_set_linkspeed(sc);
3363 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3364 NFE_WRITE(sc, NFE_PWR2_CTL,
3365 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3367 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3368 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3369 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3372 /* Request PME if WOL is requested. */
3373 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3374 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3375 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3376 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3377 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);