1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_var.h>
45 #include <net/if_arp.h>
46 #include <net/ethernet.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49 #include <net/if_types.h>
50 #include <net/if_vlan_var.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
65 #include <dev/nfe/if_nfereg.h>
66 #include <dev/nfe/if_nfevar.h>
68 MODULE_DEPEND(nfe, pci, 1, 1, 1);
69 MODULE_DEPEND(nfe, ether, 1, 1, 1);
70 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
72 /* "device miibus" required. See GENERIC if you get errors here. */
73 #include "miibus_if.h"
75 static int nfe_probe(device_t);
76 static int nfe_attach(device_t);
77 static int nfe_detach(device_t);
78 static int nfe_suspend(device_t);
79 static int nfe_resume(device_t);
80 static int nfe_shutdown(device_t);
81 static int nfe_can_use_msix(struct nfe_softc *);
82 static void nfe_power(struct nfe_softc *);
83 static int nfe_miibus_readreg(device_t, int, int);
84 static int nfe_miibus_writereg(device_t, int, int, int);
85 static void nfe_miibus_statchg(device_t);
86 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
87 static void nfe_set_intr(struct nfe_softc *);
88 static __inline void nfe_enable_intr(struct nfe_softc *);
89 static __inline void nfe_disable_intr(struct nfe_softc *);
90 static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
91 static void nfe_alloc_msix(struct nfe_softc *, int);
92 static int nfe_intr(void *);
93 static void nfe_int_task(void *, int);
94 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
95 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
96 static int nfe_newbuf(struct nfe_softc *, int);
97 static int nfe_jnewbuf(struct nfe_softc *, int);
98 static int nfe_rxeof(struct nfe_softc *, int, int *);
99 static int nfe_jrxeof(struct nfe_softc *, int, int *);
100 static void nfe_txeof(struct nfe_softc *);
101 static int nfe_encap(struct nfe_softc *, struct mbuf **);
102 static void nfe_setmulti(struct nfe_softc *);
103 static void nfe_start(struct ifnet *);
104 static void nfe_start_locked(struct ifnet *);
105 static void nfe_watchdog(struct ifnet *);
106 static void nfe_init(void *);
107 static void nfe_init_locked(void *);
108 static void nfe_stop(struct ifnet *);
109 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
114 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
115 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
118 static int nfe_ifmedia_upd(struct ifnet *);
119 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
120 static void nfe_tick(void *);
121 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
123 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
125 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
126 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
127 static void nfe_sysctl_node(struct nfe_softc *);
128 static void nfe_stats_clear(struct nfe_softc *);
129 static void nfe_stats_update(struct nfe_softc *);
130 static void nfe_set_linkspeed(struct nfe_softc *);
131 static void nfe_set_wol(struct nfe_softc *);
134 static int nfedebug = 0;
135 #define DPRINTF(sc, ...) do { \
137 device_printf((sc)->nfe_dev, __VA_ARGS__); \
139 #define DPRINTFN(sc, n, ...) do { \
140 if (nfedebug >= (n)) \
141 device_printf((sc)->nfe_dev, __VA_ARGS__); \
144 #define DPRINTF(sc, ...)
145 #define DPRINTFN(sc, n, ...)
148 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
149 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
150 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
153 static int msi_disable = 0;
154 static int msix_disable = 0;
155 static int jumbo_disable = 0;
156 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
157 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
158 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
160 static device_method_t nfe_methods[] = {
161 /* Device interface */
162 DEVMETHOD(device_probe, nfe_probe),
163 DEVMETHOD(device_attach, nfe_attach),
164 DEVMETHOD(device_detach, nfe_detach),
165 DEVMETHOD(device_suspend, nfe_suspend),
166 DEVMETHOD(device_resume, nfe_resume),
167 DEVMETHOD(device_shutdown, nfe_shutdown),
170 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
171 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
172 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
177 static driver_t nfe_driver = {
180 sizeof(struct nfe_softc)
183 static devclass_t nfe_devclass;
185 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
186 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
188 static struct nfe_type nfe_devs[] = {
189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190 "NVIDIA nForce MCP Networking Adapter"},
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
192 "NVIDIA nForce2 MCP2 Networking Adapter"},
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
194 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
196 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
198 "NVIDIA nForce3 MCP3 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
200 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
202 "NVIDIA nForce3 MCP7 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
204 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
206 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
208 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
210 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
212 "NVIDIA nForce 430 MCP12 Networking Adapter"},
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
214 "NVIDIA nForce 430 MCP13 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
216 "NVIDIA nForce MCP55 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
218 "NVIDIA nForce MCP55 Networking Adapter"},
219 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
220 "NVIDIA nForce MCP61 Networking Adapter"},
221 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
222 "NVIDIA nForce MCP61 Networking Adapter"},
223 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
224 "NVIDIA nForce MCP61 Networking Adapter"},
225 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
226 "NVIDIA nForce MCP61 Networking Adapter"},
227 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
228 "NVIDIA nForce MCP65 Networking Adapter"},
229 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
230 "NVIDIA nForce MCP65 Networking Adapter"},
231 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
232 "NVIDIA nForce MCP65 Networking Adapter"},
233 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
234 "NVIDIA nForce MCP65 Networking Adapter"},
235 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
236 "NVIDIA nForce MCP67 Networking Adapter"},
237 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
238 "NVIDIA nForce MCP67 Networking Adapter"},
239 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
240 "NVIDIA nForce MCP67 Networking Adapter"},
241 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
242 "NVIDIA nForce MCP67 Networking Adapter"},
243 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
244 "NVIDIA nForce MCP73 Networking Adapter"},
245 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
246 "NVIDIA nForce MCP73 Networking Adapter"},
247 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
248 "NVIDIA nForce MCP73 Networking Adapter"},
249 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
250 "NVIDIA nForce MCP73 Networking Adapter"},
251 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
252 "NVIDIA nForce MCP77 Networking Adapter"},
253 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
254 "NVIDIA nForce MCP77 Networking Adapter"},
255 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
256 "NVIDIA nForce MCP77 Networking Adapter"},
257 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
258 "NVIDIA nForce MCP77 Networking Adapter"},
259 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
260 "NVIDIA nForce MCP79 Networking Adapter"},
261 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
262 "NVIDIA nForce MCP79 Networking Adapter"},
263 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
264 "NVIDIA nForce MCP79 Networking Adapter"},
265 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
266 "NVIDIA nForce MCP79 Networking Adapter"},
271 /* Probe for supported hardware ID's */
273 nfe_probe(device_t dev)
278 /* Check for matching PCI DEVICE ID's */
279 while (t->name != NULL) {
280 if ((pci_get_vendor(dev) == t->vid_id) &&
281 (pci_get_device(dev) == t->dev_id)) {
282 device_set_desc(dev, t->name);
283 return (BUS_PROBE_DEFAULT);
292 nfe_alloc_msix(struct nfe_softc *sc, int count)
297 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
299 if (sc->nfe_msix_res == NULL) {
300 device_printf(sc->nfe_dev,
301 "couldn't allocate MSIX table resource\n");
305 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
306 SYS_RES_MEMORY, &rid, RF_ACTIVE);
307 if (sc->nfe_msix_pba_res == NULL) {
308 device_printf(sc->nfe_dev,
309 "couldn't allocate MSIX PBA resource\n");
310 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
312 sc->nfe_msix_res = NULL;
316 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
317 if (count == NFE_MSI_MESSAGES) {
319 device_printf(sc->nfe_dev,
320 "Using %d MSIX messages\n", count);
324 device_printf(sc->nfe_dev,
325 "couldn't allocate MSIX\n");
326 pci_release_msi(sc->nfe_dev);
327 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328 PCIR_BAR(3), sc->nfe_msix_pba_res);
329 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
330 PCIR_BAR(2), sc->nfe_msix_res);
331 sc->nfe_msix_pba_res = NULL;
332 sc->nfe_msix_res = NULL;
338 nfe_attach(device_t dev)
340 struct nfe_softc *sc;
342 bus_addr_t dma_addr_max;
343 int error = 0, i, msic, reg, rid;
345 sc = device_get_softc(dev);
348 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
350 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
352 pci_enable_busmaster(dev);
355 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
357 if (sc->nfe_res[0] == NULL) {
358 device_printf(dev, "couldn't map memory resources\n");
359 mtx_destroy(&sc->nfe_mtx);
363 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
366 v = pci_read_config(dev, reg + 0x08, 2);
367 /* Change max. read request size to 4096. */
370 pci_write_config(dev, reg + 0x08, v, 2);
372 v = pci_read_config(dev, reg + 0x0c, 2);
373 /* link capability */
375 width = pci_read_config(dev, reg + 0x12, 2);
376 /* negotiated link width */
377 width = (width >> 4) & 0x3f;
379 device_printf(sc->nfe_dev,
380 "warning, negotiated width of link(x%d) != "
381 "max. width of link(x%d)\n", width, v);
384 if (nfe_can_use_msix(sc) == 0) {
385 device_printf(sc->nfe_dev,
386 "MSI/MSI-X capability black-listed, will use INTx\n");
391 /* Allocate interrupt */
392 if (msix_disable == 0 || msi_disable == 0) {
393 if (msix_disable == 0 &&
394 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
395 nfe_alloc_msix(sc, msic);
396 if (msi_disable == 0 && sc->nfe_msix == 0 &&
397 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
398 pci_alloc_msi(dev, &msic) == 0) {
399 if (msic == NFE_MSI_MESSAGES) {
402 "Using %d MSI messages\n", msic);
405 pci_release_msi(dev);
409 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
411 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
412 RF_SHAREABLE | RF_ACTIVE);
413 if (sc->nfe_irq[0] == NULL) {
414 device_printf(dev, "couldn't allocate IRQ resources\n");
419 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
420 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
421 SYS_RES_IRQ, &rid, RF_ACTIVE);
422 if (sc->nfe_irq[i] == NULL) {
424 "couldn't allocate IRQ resources for "
425 "message %d\n", rid);
430 /* Map interrupts to vector 0. */
431 if (sc->nfe_msix != 0) {
432 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
433 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
434 } else if (sc->nfe_msi != 0) {
435 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
436 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
440 /* Set IRQ status/mask register. */
441 sc->nfe_irq_status = NFE_IRQ_STATUS;
442 sc->nfe_irq_mask = NFE_IRQ_MASK;
443 sc->nfe_intrs = NFE_IRQ_WANTED;
445 if (sc->nfe_msix != 0) {
446 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
447 sc->nfe_nointrs = NFE_IRQ_WANTED;
448 } else if (sc->nfe_msi != 0) {
449 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
450 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
453 sc->nfe_devid = pci_get_device(dev);
454 sc->nfe_revid = pci_get_revid(dev);
457 switch (sc->nfe_devid) {
458 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
459 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
460 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
461 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
462 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
464 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
465 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
466 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
468 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
469 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
470 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
471 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
472 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
475 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
476 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
477 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
478 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
481 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
482 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
483 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
484 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
485 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
486 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
487 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
488 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
489 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
490 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
491 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
492 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
493 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
494 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
496 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
497 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
498 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
499 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
500 /* XXX flow control */
501 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
502 NFE_CORRECT_MACADDR | NFE_MIB_V3;
504 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
505 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
506 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
507 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
508 /* XXX flow control */
509 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
510 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
512 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
513 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
514 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
515 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
516 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
517 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
523 /* Check for reversed ethernet address */
524 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
525 sc->nfe_flags |= NFE_CORRECT_MACADDR;
526 nfe_get_macaddr(sc, sc->eaddr);
528 * Allocate the parent bus DMA tag appropriate for PCI.
530 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
531 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
532 dma_addr_max = NFE_DMA_MAXADDR;
533 error = bus_dma_tag_create(
534 bus_get_dma_tag(sc->nfe_dev), /* parent */
535 1, 0, /* alignment, boundary */
536 dma_addr_max, /* lowaddr */
537 BUS_SPACE_MAXADDR, /* highaddr */
538 NULL, NULL, /* filter, filterarg */
539 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
540 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
542 NULL, NULL, /* lockfunc, lockarg */
543 &sc->nfe_parent_tag);
547 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
549 device_printf(dev, "can not if_alloc()\n");
555 * Allocate Tx and Rx rings.
557 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
560 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
563 nfe_alloc_jrx_ring(sc, &sc->jrxq);
564 /* Create sysctl node. */
568 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
569 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
570 ifp->if_ioctl = nfe_ioctl;
571 ifp->if_start = nfe_start;
572 ifp->if_hwassist = 0;
573 ifp->if_capabilities = 0;
574 ifp->if_init = nfe_init;
575 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
576 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
577 IFQ_SET_READY(&ifp->if_snd);
579 if (sc->nfe_flags & NFE_HW_CSUM) {
580 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
581 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
583 ifp->if_capenable = ifp->if_capabilities;
585 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
586 /* VLAN capability setup. */
587 ifp->if_capabilities |= IFCAP_VLAN_MTU;
588 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
589 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
590 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
591 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM |
595 if (pci_find_cap(dev, PCIY_PMG, ®) == 0)
596 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
597 ifp->if_capenable = ifp->if_capabilities;
600 * Tell the upper layer(s) we support long frames.
601 * Must appear after the call to ether_ifattach() because
602 * ether_ifattach() sets ifi_hdrlen to the default value.
604 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
606 #ifdef DEVICE_POLLING
607 ifp->if_capabilities |= IFCAP_POLLING;
611 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
612 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
615 device_printf(dev, "attaching PHYs failed\n");
618 ether_ifattach(ifp, sc->eaddr);
620 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
621 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
622 taskqueue_thread_enqueue, &sc->nfe_tq);
623 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
624 device_get_nameunit(sc->nfe_dev));
626 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
627 error = bus_setup_intr(dev, sc->nfe_irq[0],
628 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
629 &sc->nfe_intrhand[0]);
631 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
632 error = bus_setup_intr(dev, sc->nfe_irq[i],
633 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
634 &sc->nfe_intrhand[i]);
640 device_printf(dev, "couldn't set up irq\n");
641 taskqueue_free(sc->nfe_tq);
656 nfe_detach(device_t dev)
658 struct nfe_softc *sc;
660 uint8_t eaddr[ETHER_ADDR_LEN];
663 sc = device_get_softc(dev);
664 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
667 #ifdef DEVICE_POLLING
668 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
669 ether_poll_deregister(ifp);
671 if (device_is_attached(dev)) {
674 ifp->if_flags &= ~IFF_UP;
676 callout_drain(&sc->nfe_stat_ch);
681 /* restore ethernet address */
682 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
683 for (i = 0; i < ETHER_ADDR_LEN; i++) {
684 eaddr[i] = sc->eaddr[5 - i];
687 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
688 nfe_set_macaddr(sc, eaddr);
692 device_delete_child(dev, sc->nfe_miibus);
693 bus_generic_detach(dev);
694 if (sc->nfe_tq != NULL) {
695 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
696 taskqueue_free(sc->nfe_tq);
700 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
701 if (sc->nfe_intrhand[i] != NULL) {
702 bus_teardown_intr(dev, sc->nfe_irq[i],
703 sc->nfe_intrhand[i]);
704 sc->nfe_intrhand[i] = NULL;
708 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
709 if (sc->nfe_irq[0] != NULL)
710 bus_release_resource(dev, SYS_RES_IRQ, 0,
713 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
714 if (sc->nfe_irq[i] != NULL) {
715 bus_release_resource(dev, SYS_RES_IRQ, rid,
717 sc->nfe_irq[i] = NULL;
720 pci_release_msi(dev);
722 if (sc->nfe_msix_pba_res != NULL) {
723 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
724 sc->nfe_msix_pba_res);
725 sc->nfe_msix_pba_res = NULL;
727 if (sc->nfe_msix_res != NULL) {
728 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
730 sc->nfe_msix_res = NULL;
732 if (sc->nfe_res[0] != NULL) {
733 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
735 sc->nfe_res[0] = NULL;
738 nfe_free_tx_ring(sc, &sc->txq);
739 nfe_free_rx_ring(sc, &sc->rxq);
740 nfe_free_jrx_ring(sc, &sc->jrxq);
742 if (sc->nfe_parent_tag) {
743 bus_dma_tag_destroy(sc->nfe_parent_tag);
744 sc->nfe_parent_tag = NULL;
747 mtx_destroy(&sc->nfe_mtx);
754 nfe_suspend(device_t dev)
756 struct nfe_softc *sc;
758 sc = device_get_softc(dev);
761 nfe_stop(sc->nfe_ifp);
763 sc->nfe_suspended = 1;
771 nfe_resume(device_t dev)
773 struct nfe_softc *sc;
776 sc = device_get_softc(dev);
781 if (ifp->if_flags & IFF_UP)
783 sc->nfe_suspended = 0;
791 nfe_can_use_msix(struct nfe_softc *sc)
793 static struct msix_blacklist {
796 } msix_blacklists[] = {
797 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
800 struct msix_blacklist *mblp;
801 char *maker, *product;
802 int count, n, use_msix;
805 * Search base board manufacturer and product name table
806 * to see this system has a known MSI/MSI-X issue.
808 maker = getenv("smbios.planar.maker");
809 product = getenv("smbios.planar.product");
811 if (maker != NULL && product != NULL) {
812 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
813 mblp = msix_blacklists;
814 for (n = 0; n < count; n++) {
815 if (strcmp(maker, mblp->maker) == 0 &&
816 strcmp(product, mblp->product) == 0) {
832 /* Take PHY/NIC out of powerdown, from Linux */
834 nfe_power(struct nfe_softc *sc)
838 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
840 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
841 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
843 NFE_WRITE(sc, NFE_MAC_RESET, 0);
845 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
846 pwr = NFE_READ(sc, NFE_PWR2_CTL);
847 pwr &= ~NFE_PWR2_WAKEUP_MASK;
848 if (sc->nfe_revid >= 0xa3 &&
849 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
850 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
851 pwr |= NFE_PWR2_REVA3;
852 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
857 nfe_miibus_statchg(device_t dev)
859 struct nfe_softc *sc;
860 struct mii_data *mii;
862 uint32_t rxctl, txctl;
864 sc = device_get_softc(dev);
866 mii = device_get_softc(sc->nfe_miibus);
870 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
871 (IFM_ACTIVE | IFM_AVALID)) {
872 switch (IFM_SUBTYPE(mii->mii_media_active)) {
883 nfe_mac_config(sc, mii);
884 txctl = NFE_READ(sc, NFE_TX_CTL);
885 rxctl = NFE_READ(sc, NFE_RX_CTL);
886 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
887 txctl |= NFE_TX_START;
888 rxctl |= NFE_RX_START;
890 txctl &= ~NFE_TX_START;
891 rxctl &= ~NFE_RX_START;
893 NFE_WRITE(sc, NFE_TX_CTL, txctl);
894 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
899 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
901 uint32_t link, misc, phy, seed;
906 phy = NFE_READ(sc, NFE_PHY_IFACE);
907 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
909 seed = NFE_READ(sc, NFE_RNDSEED);
910 seed &= ~NFE_SEED_MASK;
912 misc = NFE_MISC1_MAGIC;
913 link = NFE_MEDIA_SET;
915 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
916 phy |= NFE_PHY_HDX; /* half-duplex */
917 misc |= NFE_MISC1_HDX;
920 switch (IFM_SUBTYPE(mii->mii_media_active)) {
921 case IFM_1000_T: /* full-duplex only */
922 link |= NFE_MEDIA_1000T;
923 seed |= NFE_SEED_1000T;
924 phy |= NFE_PHY_1000T;
927 link |= NFE_MEDIA_100TX;
928 seed |= NFE_SEED_100TX;
929 phy |= NFE_PHY_100TX;
932 link |= NFE_MEDIA_10T;
933 seed |= NFE_SEED_10T;
937 if ((phy & 0x10000000) != 0) {
938 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
939 val = NFE_R1_MAGIC_1000;
941 val = NFE_R1_MAGIC_10_100;
943 val = NFE_R1_MAGIC_DEFAULT;
944 NFE_WRITE(sc, NFE_SETUP_R1, val);
946 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
948 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
949 NFE_WRITE(sc, NFE_MISC1, misc);
950 NFE_WRITE(sc, NFE_LINKSPEED, link);
952 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
953 /* It seems all hardwares supports Rx pause frames. */
954 val = NFE_READ(sc, NFE_RXFILTER);
955 if ((IFM_OPTIONS(mii->mii_media_active) &
956 IFM_ETH_RXPAUSE) != 0)
957 val |= NFE_PFF_RX_PAUSE;
959 val &= ~NFE_PFF_RX_PAUSE;
960 NFE_WRITE(sc, NFE_RXFILTER, val);
961 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
962 val = NFE_READ(sc, NFE_MISC1);
963 if ((IFM_OPTIONS(mii->mii_media_active) &
964 IFM_ETH_TXPAUSE) != 0) {
965 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
966 NFE_TX_PAUSE_FRAME_ENABLE);
967 val |= NFE_MISC1_TX_PAUSE;
969 val &= ~NFE_MISC1_TX_PAUSE;
970 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
971 NFE_TX_PAUSE_FRAME_DISABLE);
973 NFE_WRITE(sc, NFE_MISC1, val);
976 /* disable rx/tx pause frames */
977 val = NFE_READ(sc, NFE_RXFILTER);
978 val &= ~NFE_PFF_RX_PAUSE;
979 NFE_WRITE(sc, NFE_RXFILTER, val);
980 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
981 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
982 NFE_TX_PAUSE_FRAME_DISABLE);
983 val = NFE_READ(sc, NFE_MISC1);
984 val &= ~NFE_MISC1_TX_PAUSE;
985 NFE_WRITE(sc, NFE_MISC1, val);
992 nfe_miibus_readreg(device_t dev, int phy, int reg)
994 struct nfe_softc *sc = device_get_softc(dev);
998 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1000 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1001 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1005 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1007 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1009 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1012 if (ntries == NFE_TIMEOUT) {
1013 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1017 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1018 DPRINTFN(sc, 2, "could not read PHY\n");
1022 val = NFE_READ(sc, NFE_PHY_DATA);
1023 if (val != 0xffffffff && val != 0)
1024 sc->mii_phyaddr = phy;
1026 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1033 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1035 struct nfe_softc *sc = device_get_softc(dev);
1039 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1041 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1042 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1046 NFE_WRITE(sc, NFE_PHY_DATA, val);
1047 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1048 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1050 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1052 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1056 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1057 device_printf(sc->nfe_dev, "could not write to PHY\n");
1062 struct nfe_dmamap_arg {
1063 bus_addr_t nfe_busaddr;
1067 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1069 struct nfe_dmamap_arg ctx;
1070 struct nfe_rx_data *data;
1072 int i, error, descsize;
1074 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1075 desc = ring->desc64;
1076 descsize = sizeof (struct nfe_desc64);
1078 desc = ring->desc32;
1079 descsize = sizeof (struct nfe_desc32);
1082 ring->cur = ring->next = 0;
1084 error = bus_dma_tag_create(sc->nfe_parent_tag,
1085 NFE_RING_ALIGN, 0, /* alignment, boundary */
1086 BUS_SPACE_MAXADDR, /* lowaddr */
1087 BUS_SPACE_MAXADDR, /* highaddr */
1088 NULL, NULL, /* filter, filterarg */
1089 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1090 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1092 NULL, NULL, /* lockfunc, lockarg */
1093 &ring->rx_desc_tag);
1095 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1099 /* allocate memory to desc */
1100 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1101 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1103 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1106 if (sc->nfe_flags & NFE_40BIT_ADDR)
1107 ring->desc64 = desc;
1109 ring->desc32 = desc;
1111 /* map desc to device visible address space */
1112 ctx.nfe_busaddr = 0;
1113 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1114 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1116 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1119 ring->physaddr = ctx.nfe_busaddr;
1121 error = bus_dma_tag_create(sc->nfe_parent_tag,
1122 1, 0, /* alignment, boundary */
1123 BUS_SPACE_MAXADDR, /* lowaddr */
1124 BUS_SPACE_MAXADDR, /* highaddr */
1125 NULL, NULL, /* filter, filterarg */
1126 MCLBYTES, 1, /* maxsize, nsegments */
1127 MCLBYTES, /* maxsegsize */
1129 NULL, NULL, /* lockfunc, lockarg */
1130 &ring->rx_data_tag);
1132 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1136 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1138 device_printf(sc->nfe_dev,
1139 "could not create Rx DMA spare map\n");
1144 * Pre-allocate Rx buffers and populate Rx ring.
1146 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1147 data = &sc->rxq.data[i];
1148 data->rx_data_map = NULL;
1150 error = bus_dmamap_create(ring->rx_data_tag, 0,
1151 &data->rx_data_map);
1153 device_printf(sc->nfe_dev,
1154 "could not create Rx DMA map\n");
1165 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1167 struct nfe_dmamap_arg ctx;
1168 struct nfe_rx_data *data;
1170 int i, error, descsize;
1172 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1174 if (jumbo_disable != 0) {
1175 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1176 sc->nfe_jumbo_disable = 1;
1180 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1181 desc = ring->jdesc64;
1182 descsize = sizeof (struct nfe_desc64);
1184 desc = ring->jdesc32;
1185 descsize = sizeof (struct nfe_desc32);
1188 ring->jcur = ring->jnext = 0;
1190 /* Create DMA tag for jumbo Rx ring. */
1191 error = bus_dma_tag_create(sc->nfe_parent_tag,
1192 NFE_RING_ALIGN, 0, /* alignment, boundary */
1193 BUS_SPACE_MAXADDR, /* lowaddr */
1194 BUS_SPACE_MAXADDR, /* highaddr */
1195 NULL, NULL, /* filter, filterarg */
1196 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1198 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1200 NULL, NULL, /* lockfunc, lockarg */
1201 &ring->jrx_desc_tag);
1203 device_printf(sc->nfe_dev,
1204 "could not create jumbo ring DMA tag\n");
1208 /* Create DMA tag for jumbo Rx buffers. */
1209 error = bus_dma_tag_create(sc->nfe_parent_tag,
1210 1, 0, /* alignment, boundary */
1211 BUS_SPACE_MAXADDR, /* lowaddr */
1212 BUS_SPACE_MAXADDR, /* highaddr */
1213 NULL, NULL, /* filter, filterarg */
1214 MJUM9BYTES, /* maxsize */
1216 MJUM9BYTES, /* maxsegsize */
1218 NULL, NULL, /* lockfunc, lockarg */
1219 &ring->jrx_data_tag);
1221 device_printf(sc->nfe_dev,
1222 "could not create jumbo Rx buffer DMA tag\n");
1226 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1227 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1228 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1230 device_printf(sc->nfe_dev,
1231 "could not allocate DMA'able memory for jumbo Rx ring\n");
1234 if (sc->nfe_flags & NFE_40BIT_ADDR)
1235 ring->jdesc64 = desc;
1237 ring->jdesc32 = desc;
1239 ctx.nfe_busaddr = 0;
1240 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1241 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1243 device_printf(sc->nfe_dev,
1244 "could not load DMA'able memory for jumbo Rx ring\n");
1247 ring->jphysaddr = ctx.nfe_busaddr;
1249 /* Create DMA maps for jumbo Rx buffers. */
1250 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1252 device_printf(sc->nfe_dev,
1253 "could not create jumbo Rx DMA spare map\n");
1257 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1258 data = &sc->jrxq.jdata[i];
1259 data->rx_data_map = NULL;
1261 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1262 &data->rx_data_map);
1264 device_printf(sc->nfe_dev,
1265 "could not create jumbo Rx DMA map\n");
1274 * Running without jumbo frame support is ok for most cases
1275 * so don't fail on creating dma tag/map for jumbo frame.
1277 nfe_free_jrx_ring(sc, ring);
1278 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1279 "resource shortage\n");
1280 sc->nfe_jumbo_disable = 1;
1285 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1291 ring->cur = ring->next = 0;
1292 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1293 desc = ring->desc64;
1294 descsize = sizeof (struct nfe_desc64);
1296 desc = ring->desc32;
1297 descsize = sizeof (struct nfe_desc32);
1299 bzero(desc, descsize * NFE_RX_RING_COUNT);
1300 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1301 if (nfe_newbuf(sc, i) != 0)
1305 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1313 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1319 ring->jcur = ring->jnext = 0;
1320 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1321 desc = ring->jdesc64;
1322 descsize = sizeof (struct nfe_desc64);
1324 desc = ring->jdesc32;
1325 descsize = sizeof (struct nfe_desc32);
1327 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1328 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1329 if (nfe_jnewbuf(sc, i) != 0)
1333 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1334 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1341 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1343 struct nfe_rx_data *data;
1347 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1348 desc = ring->desc64;
1349 descsize = sizeof (struct nfe_desc64);
1351 desc = ring->desc32;
1352 descsize = sizeof (struct nfe_desc32);
1355 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1356 data = &ring->data[i];
1357 if (data->rx_data_map != NULL) {
1358 bus_dmamap_destroy(ring->rx_data_tag,
1360 data->rx_data_map = NULL;
1362 if (data->m != NULL) {
1367 if (ring->rx_data_tag != NULL) {
1368 if (ring->rx_spare_map != NULL) {
1369 bus_dmamap_destroy(ring->rx_data_tag,
1370 ring->rx_spare_map);
1371 ring->rx_spare_map = NULL;
1373 bus_dma_tag_destroy(ring->rx_data_tag);
1374 ring->rx_data_tag = NULL;
1378 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1379 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1380 ring->desc64 = NULL;
1381 ring->desc32 = NULL;
1382 ring->rx_desc_map = NULL;
1384 if (ring->rx_desc_tag != NULL) {
1385 bus_dma_tag_destroy(ring->rx_desc_tag);
1386 ring->rx_desc_tag = NULL;
1392 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1394 struct nfe_rx_data *data;
1398 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1401 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1402 desc = ring->jdesc64;
1403 descsize = sizeof (struct nfe_desc64);
1405 desc = ring->jdesc32;
1406 descsize = sizeof (struct nfe_desc32);
1409 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1410 data = &ring->jdata[i];
1411 if (data->rx_data_map != NULL) {
1412 bus_dmamap_destroy(ring->jrx_data_tag,
1414 data->rx_data_map = NULL;
1416 if (data->m != NULL) {
1421 if (ring->jrx_data_tag != NULL) {
1422 if (ring->jrx_spare_map != NULL) {
1423 bus_dmamap_destroy(ring->jrx_data_tag,
1424 ring->jrx_spare_map);
1425 ring->jrx_spare_map = NULL;
1427 bus_dma_tag_destroy(ring->jrx_data_tag);
1428 ring->jrx_data_tag = NULL;
1432 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1433 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1434 ring->jdesc64 = NULL;
1435 ring->jdesc32 = NULL;
1436 ring->jrx_desc_map = NULL;
1439 if (ring->jrx_desc_tag != NULL) {
1440 bus_dma_tag_destroy(ring->jrx_desc_tag);
1441 ring->jrx_desc_tag = NULL;
1447 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1449 struct nfe_dmamap_arg ctx;
1454 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1455 desc = ring->desc64;
1456 descsize = sizeof (struct nfe_desc64);
1458 desc = ring->desc32;
1459 descsize = sizeof (struct nfe_desc32);
1463 ring->cur = ring->next = 0;
1465 error = bus_dma_tag_create(sc->nfe_parent_tag,
1466 NFE_RING_ALIGN, 0, /* alignment, boundary */
1467 BUS_SPACE_MAXADDR, /* lowaddr */
1468 BUS_SPACE_MAXADDR, /* highaddr */
1469 NULL, NULL, /* filter, filterarg */
1470 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1471 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1473 NULL, NULL, /* lockfunc, lockarg */
1474 &ring->tx_desc_tag);
1476 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1480 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1481 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1483 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1486 if (sc->nfe_flags & NFE_40BIT_ADDR)
1487 ring->desc64 = desc;
1489 ring->desc32 = desc;
1491 ctx.nfe_busaddr = 0;
1492 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1493 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1495 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1498 ring->physaddr = ctx.nfe_busaddr;
1500 error = bus_dma_tag_create(sc->nfe_parent_tag,
1510 &ring->tx_data_tag);
1512 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1516 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1517 error = bus_dmamap_create(ring->tx_data_tag, 0,
1518 &ring->data[i].tx_data_map);
1520 device_printf(sc->nfe_dev,
1521 "could not create Tx DMA map\n");
1532 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1537 sc->nfe_force_tx = 0;
1539 ring->cur = ring->next = 0;
1540 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1541 desc = ring->desc64;
1542 descsize = sizeof (struct nfe_desc64);
1544 desc = ring->desc32;
1545 descsize = sizeof (struct nfe_desc32);
1547 bzero(desc, descsize * NFE_TX_RING_COUNT);
1549 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1550 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1555 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1557 struct nfe_tx_data *data;
1561 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1562 desc = ring->desc64;
1563 descsize = sizeof (struct nfe_desc64);
1565 desc = ring->desc32;
1566 descsize = sizeof (struct nfe_desc32);
1569 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1570 data = &ring->data[i];
1572 if (data->m != NULL) {
1573 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1574 BUS_DMASYNC_POSTWRITE);
1575 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1579 if (data->tx_data_map != NULL) {
1580 bus_dmamap_destroy(ring->tx_data_tag,
1582 data->tx_data_map = NULL;
1586 if (ring->tx_data_tag != NULL) {
1587 bus_dma_tag_destroy(ring->tx_data_tag);
1588 ring->tx_data_tag = NULL;
1592 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1593 BUS_DMASYNC_POSTWRITE);
1594 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1595 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1596 ring->desc64 = NULL;
1597 ring->desc32 = NULL;
1598 ring->tx_desc_map = NULL;
1599 bus_dma_tag_destroy(ring->tx_desc_tag);
1600 ring->tx_desc_tag = NULL;
1604 #ifdef DEVICE_POLLING
1605 static poll_handler_t nfe_poll;
1609 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1611 struct nfe_softc *sc = ifp->if_softc;
1617 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1622 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1623 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1625 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1627 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1628 nfe_start_locked(ifp);
1630 if (cmd == POLL_AND_CHECK_STATUS) {
1631 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1635 NFE_WRITE(sc, sc->nfe_irq_status, r);
1637 if (r & NFE_IRQ_LINK) {
1638 NFE_READ(sc, NFE_PHY_STATUS);
1639 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1640 DPRINTF(sc, "link state changed\n");
1646 #endif /* DEVICE_POLLING */
1649 nfe_set_intr(struct nfe_softc *sc)
1652 if (sc->nfe_msi != 0)
1653 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1657 /* In MSIX, a write to mask reegisters behaves as XOR. */
1658 static __inline void
1659 nfe_enable_intr(struct nfe_softc *sc)
1662 if (sc->nfe_msix != 0) {
1663 /* XXX Should have a better way to enable interrupts! */
1664 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1665 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1667 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1671 static __inline void
1672 nfe_disable_intr(struct nfe_softc *sc)
1675 if (sc->nfe_msix != 0) {
1676 /* XXX Should have a better way to disable interrupts! */
1677 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1678 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1680 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1685 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1687 struct nfe_softc *sc;
1689 struct mii_data *mii;
1690 int error, init, mask;
1693 ifr = (struct ifreq *) data;
1698 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1700 else if (ifp->if_mtu != ifr->ifr_mtu) {
1701 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1702 (sc->nfe_jumbo_disable != 0)) &&
1703 ifr->ifr_mtu > ETHERMTU)
1707 ifp->if_mtu = ifr->ifr_mtu;
1708 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1709 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1710 nfe_init_locked(sc);
1718 if (ifp->if_flags & IFF_UP) {
1720 * If only the PROMISC or ALLMULTI flag changes, then
1721 * don't do a full re-init of the chip, just update
1724 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1725 ((ifp->if_flags ^ sc->nfe_if_flags) &
1726 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1729 nfe_init_locked(sc);
1731 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1734 sc->nfe_if_flags = ifp->if_flags;
1740 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1749 mii = device_get_softc(sc->nfe_miibus);
1750 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1753 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1754 #ifdef DEVICE_POLLING
1755 if ((mask & IFCAP_POLLING) != 0) {
1756 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1757 error = ether_poll_register(nfe_poll, ifp);
1761 nfe_disable_intr(sc);
1762 ifp->if_capenable |= IFCAP_POLLING;
1765 error = ether_poll_deregister(ifp);
1766 /* Enable interrupt even in error case */
1768 nfe_enable_intr(sc);
1769 ifp->if_capenable &= ~IFCAP_POLLING;
1773 #endif /* DEVICE_POLLING */
1774 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1775 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1776 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1777 if ((mask & IFCAP_TXCSUM) != 0 &&
1778 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1779 ifp->if_capenable ^= IFCAP_TXCSUM;
1780 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1781 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1783 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1785 if ((mask & IFCAP_RXCSUM) != 0 &&
1786 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1787 ifp->if_capenable ^= IFCAP_RXCSUM;
1790 if ((mask & IFCAP_TSO4) != 0 &&
1791 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1792 ifp->if_capenable ^= IFCAP_TSO4;
1793 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1794 ifp->if_hwassist |= CSUM_TSO;
1796 ifp->if_hwassist &= ~CSUM_TSO;
1798 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1799 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1800 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1801 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1802 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1803 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1804 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1805 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
1810 * It seems that VLAN stripping requires Rx checksum offload.
1811 * Unfortunately FreeBSD has no way to disable only Rx side
1812 * VLAN stripping. So when we know Rx checksum offload is
1813 * disabled turn entire hardware VLAN assist off.
1815 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
1816 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
1818 ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING |
1821 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1822 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1825 VLAN_CAPABILITIES(ifp);
1828 error = ether_ioctl(ifp, cmd, data);
1839 struct nfe_softc *sc;
1842 sc = (struct nfe_softc *)arg;
1844 status = NFE_READ(sc, sc->nfe_irq_status);
1845 if (status == 0 || status == 0xffffffff)
1846 return (FILTER_STRAY);
1847 nfe_disable_intr(sc);
1848 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1850 return (FILTER_HANDLED);
1855 nfe_int_task(void *arg, int pending)
1857 struct nfe_softc *sc = arg;
1858 struct ifnet *ifp = sc->nfe_ifp;
1864 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1865 nfe_enable_intr(sc);
1867 return; /* not for us */
1869 NFE_WRITE(sc, sc->nfe_irq_status, r);
1871 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1873 #ifdef DEVICE_POLLING
1874 if (ifp->if_capenable & IFCAP_POLLING) {
1880 if (r & NFE_IRQ_LINK) {
1881 NFE_READ(sc, NFE_PHY_STATUS);
1882 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1883 DPRINTF(sc, "link state changed\n");
1886 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1888 nfe_disable_intr(sc);
1894 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1895 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1897 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1901 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1902 nfe_start_locked(ifp);
1906 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1907 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1911 /* Reenable interrupts. */
1912 nfe_enable_intr(sc);
1916 static __inline void
1917 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1919 struct nfe_desc32 *desc32;
1920 struct nfe_desc64 *desc64;
1921 struct nfe_rx_data *data;
1924 data = &sc->rxq.data[idx];
1927 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1928 desc64 = &sc->rxq.desc64[idx];
1929 /* VLAN packet may have overwritten it. */
1930 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1931 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1932 desc64->length = htole16(m->m_len);
1933 desc64->flags = htole16(NFE_RX_READY);
1935 desc32 = &sc->rxq.desc32[idx];
1936 desc32->length = htole16(m->m_len);
1937 desc32->flags = htole16(NFE_RX_READY);
1942 static __inline void
1943 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1945 struct nfe_desc32 *desc32;
1946 struct nfe_desc64 *desc64;
1947 struct nfe_rx_data *data;
1950 data = &sc->jrxq.jdata[idx];
1953 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1954 desc64 = &sc->jrxq.jdesc64[idx];
1955 /* VLAN packet may have overwritten it. */
1956 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1957 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1958 desc64->length = htole16(m->m_len);
1959 desc64->flags = htole16(NFE_RX_READY);
1961 desc32 = &sc->jrxq.jdesc32[idx];
1962 desc32->length = htole16(m->m_len);
1963 desc32->flags = htole16(NFE_RX_READY);
1969 nfe_newbuf(struct nfe_softc *sc, int idx)
1971 struct nfe_rx_data *data;
1972 struct nfe_desc32 *desc32;
1973 struct nfe_desc64 *desc64;
1975 bus_dma_segment_t segs[1];
1979 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1983 m->m_len = m->m_pkthdr.len = MCLBYTES;
1984 m_adj(m, ETHER_ALIGN);
1986 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1987 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1991 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1993 data = &sc->rxq.data[idx];
1994 if (data->m != NULL) {
1995 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1996 BUS_DMASYNC_POSTREAD);
1997 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1999 map = data->rx_data_map;
2000 data->rx_data_map = sc->rxq.rx_spare_map;
2001 sc->rxq.rx_spare_map = map;
2002 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2003 BUS_DMASYNC_PREREAD);
2004 data->paddr = segs[0].ds_addr;
2006 /* update mapping address in h/w descriptor */
2007 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2008 desc64 = &sc->rxq.desc64[idx];
2009 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2010 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2011 desc64->length = htole16(segs[0].ds_len);
2012 desc64->flags = htole16(NFE_RX_READY);
2014 desc32 = &sc->rxq.desc32[idx];
2015 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2016 desc32->length = htole16(segs[0].ds_len);
2017 desc32->flags = htole16(NFE_RX_READY);
2025 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2027 struct nfe_rx_data *data;
2028 struct nfe_desc32 *desc32;
2029 struct nfe_desc64 *desc64;
2031 bus_dma_segment_t segs[1];
2035 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2038 if ((m->m_flags & M_EXT) == 0) {
2042 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2043 m_adj(m, ETHER_ALIGN);
2045 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2046 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2050 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2052 data = &sc->jrxq.jdata[idx];
2053 if (data->m != NULL) {
2054 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2055 BUS_DMASYNC_POSTREAD);
2056 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2058 map = data->rx_data_map;
2059 data->rx_data_map = sc->jrxq.jrx_spare_map;
2060 sc->jrxq.jrx_spare_map = map;
2061 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2062 BUS_DMASYNC_PREREAD);
2063 data->paddr = segs[0].ds_addr;
2065 /* update mapping address in h/w descriptor */
2066 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2067 desc64 = &sc->jrxq.jdesc64[idx];
2068 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2069 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2070 desc64->length = htole16(segs[0].ds_len);
2071 desc64->flags = htole16(NFE_RX_READY);
2073 desc32 = &sc->jrxq.jdesc32[idx];
2074 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2075 desc32->length = htole16(segs[0].ds_len);
2076 desc32->flags = htole16(NFE_RX_READY);
2084 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2086 struct ifnet *ifp = sc->nfe_ifp;
2087 struct nfe_desc32 *desc32;
2088 struct nfe_desc64 *desc64;
2089 struct nfe_rx_data *data;
2092 int len, prog, rx_npkts;
2096 NFE_LOCK_ASSERT(sc);
2098 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2099 BUS_DMASYNC_POSTREAD);
2101 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2106 data = &sc->rxq.data[sc->rxq.cur];
2108 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2109 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2110 vtag = le32toh(desc64->physaddr[1]);
2111 flags = le16toh(desc64->flags);
2112 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2114 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2115 flags = le16toh(desc32->flags);
2116 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2119 if (flags & NFE_RX_READY)
2122 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2123 if (!(flags & NFE_RX_VALID_V1)) {
2125 nfe_discard_rxbuf(sc, sc->rxq.cur);
2128 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2129 flags &= ~NFE_RX_ERROR;
2130 len--; /* fix buffer length */
2133 if (!(flags & NFE_RX_VALID_V2)) {
2135 nfe_discard_rxbuf(sc, sc->rxq.cur);
2139 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2140 flags &= ~NFE_RX_ERROR;
2141 len--; /* fix buffer length */
2145 if (flags & NFE_RX_ERROR) {
2147 nfe_discard_rxbuf(sc, sc->rxq.cur);
2152 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2154 nfe_discard_rxbuf(sc, sc->rxq.cur);
2158 if ((vtag & NFE_RX_VTAG) != 0 &&
2159 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2160 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2161 m->m_flags |= M_VLANTAG;
2164 m->m_pkthdr.len = m->m_len = len;
2165 m->m_pkthdr.rcvif = ifp;
2167 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2168 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2169 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2170 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2171 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2172 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2173 m->m_pkthdr.csum_flags |=
2174 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2175 m->m_pkthdr.csum_data = 0xffff;
2183 (*ifp->if_input)(ifp, m);
2189 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2190 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2192 if (rx_npktsp != NULL)
2193 *rx_npktsp = rx_npkts;
2194 return (count > 0 ? 0 : EAGAIN);
2199 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2201 struct ifnet *ifp = sc->nfe_ifp;
2202 struct nfe_desc32 *desc32;
2203 struct nfe_desc64 *desc64;
2204 struct nfe_rx_data *data;
2207 int len, prog, rx_npkts;
2211 NFE_LOCK_ASSERT(sc);
2213 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2214 BUS_DMASYNC_POSTREAD);
2216 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2222 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2224 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2225 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2226 vtag = le32toh(desc64->physaddr[1]);
2227 flags = le16toh(desc64->flags);
2228 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2230 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2231 flags = le16toh(desc32->flags);
2232 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2235 if (flags & NFE_RX_READY)
2238 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2239 if (!(flags & NFE_RX_VALID_V1)) {
2241 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2244 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2245 flags &= ~NFE_RX_ERROR;
2246 len--; /* fix buffer length */
2249 if (!(flags & NFE_RX_VALID_V2)) {
2251 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2255 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2256 flags &= ~NFE_RX_ERROR;
2257 len--; /* fix buffer length */
2261 if (flags & NFE_RX_ERROR) {
2263 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2268 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2270 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2274 if ((vtag & NFE_RX_VTAG) != 0 &&
2275 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2276 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2277 m->m_flags |= M_VLANTAG;
2280 m->m_pkthdr.len = m->m_len = len;
2281 m->m_pkthdr.rcvif = ifp;
2283 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2284 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2285 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2286 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2287 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2288 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2289 m->m_pkthdr.csum_flags |=
2290 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2291 m->m_pkthdr.csum_data = 0xffff;
2299 (*ifp->if_input)(ifp, m);
2305 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2308 if (rx_npktsp != NULL)
2309 *rx_npktsp = rx_npkts;
2310 return (count > 0 ? 0 : EAGAIN);
2315 nfe_txeof(struct nfe_softc *sc)
2317 struct ifnet *ifp = sc->nfe_ifp;
2318 struct nfe_desc32 *desc32;
2319 struct nfe_desc64 *desc64;
2320 struct nfe_tx_data *data = NULL;
2324 NFE_LOCK_ASSERT(sc);
2326 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2327 BUS_DMASYNC_POSTREAD);
2330 for (cons = sc->txq.next; cons != sc->txq.cur;
2331 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2332 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2333 desc64 = &sc->txq.desc64[cons];
2334 flags = le16toh(desc64->flags);
2336 desc32 = &sc->txq.desc32[cons];
2337 flags = le16toh(desc32->flags);
2340 if (flags & NFE_TX_VALID)
2345 data = &sc->txq.data[cons];
2347 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2348 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2350 if ((flags & NFE_TX_ERROR_V1) != 0) {
2351 device_printf(sc->nfe_dev,
2352 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2358 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2360 if ((flags & NFE_TX_ERROR_V2) != 0) {
2361 device_printf(sc->nfe_dev,
2362 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2368 /* last fragment of the mbuf chain transmitted */
2369 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2370 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2371 BUS_DMASYNC_POSTWRITE);
2372 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2378 sc->nfe_force_tx = 0;
2379 sc->txq.next = cons;
2380 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2381 if (sc->txq.queued == 0)
2382 sc->nfe_watchdog_timer = 0;
2387 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2389 struct nfe_desc32 *desc32 = NULL;
2390 struct nfe_desc64 *desc64 = NULL;
2392 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2393 int error, i, nsegs, prod, si;
2395 uint16_t cflags, flags;
2398 prod = si = sc->txq.cur;
2399 map = sc->txq.data[prod].tx_data_map;
2401 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2402 &nsegs, BUS_DMA_NOWAIT);
2403 if (error == EFBIG) {
2404 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2411 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2412 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2418 } else if (error != 0)
2426 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2427 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2434 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2435 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2437 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2438 cflags |= NFE_TX_TSO;
2439 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2440 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2441 cflags |= NFE_TX_IP_CSUM;
2442 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2443 cflags |= NFE_TX_TCP_UDP_CSUM;
2444 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2445 cflags |= NFE_TX_TCP_UDP_CSUM;
2448 for (i = 0; i < nsegs; i++) {
2449 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2450 desc64 = &sc->txq.desc64[prod];
2451 desc64->physaddr[0] =
2452 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2453 desc64->physaddr[1] =
2454 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2456 desc64->length = htole16(segs[i].ds_len - 1);
2457 desc64->flags = htole16(flags);
2459 desc32 = &sc->txq.desc32[prod];
2461 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2462 desc32->length = htole16(segs[i].ds_len - 1);
2463 desc32->flags = htole16(flags);
2467 * Setting of the valid bit in the first descriptor is
2468 * deferred until the whole chain is fully setup.
2470 flags |= NFE_TX_VALID;
2473 NFE_INC(prod, NFE_TX_RING_COUNT);
2477 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2478 * csum flags, vtag and TSO belong to the first fragment only.
2480 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2481 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2482 desc64 = &sc->txq.desc64[si];
2483 if ((m->m_flags & M_VLANTAG) != 0)
2484 desc64->vtag = htole32(NFE_TX_VTAG |
2485 m->m_pkthdr.ether_vtag);
2486 if (tsosegsz != 0) {
2489 * The following indicates the descriptor element
2490 * is a 32bit quantity.
2492 desc64->length |= htole16((uint16_t)tsosegsz);
2493 desc64->flags |= htole16(tsosegsz >> 16);
2496 * finally, set the valid/checksum/TSO bit in the first
2499 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2501 if (sc->nfe_flags & NFE_JUMBO_SUP)
2502 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2504 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2505 desc32 = &sc->txq.desc32[si];
2506 if (tsosegsz != 0) {
2509 * The following indicates the descriptor element
2510 * is a 32bit quantity.
2512 desc32->length |= htole16((uint16_t)tsosegsz);
2513 desc32->flags |= htole16(tsosegsz >> 16);
2516 * finally, set the valid/checksum/TSO bit in the first
2519 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2523 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2524 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2525 sc->txq.data[prod].tx_data_map = map;
2526 sc->txq.data[prod].m = m;
2528 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2535 nfe_setmulti(struct nfe_softc *sc)
2537 struct ifnet *ifp = sc->nfe_ifp;
2538 struct ifmultiaddr *ifma;
2541 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2542 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2543 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2546 NFE_LOCK_ASSERT(sc);
2548 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2549 bzero(addr, ETHER_ADDR_LEN);
2550 bzero(mask, ETHER_ADDR_LEN);
2554 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2555 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2557 if_maddr_rlock(ifp);
2558 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2561 if (ifma->ifma_addr->sa_family != AF_LINK)
2564 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2565 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2566 u_int8_t mcaddr = addrp[i];
2571 if_maddr_runlock(ifp);
2573 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2578 addr[0] |= 0x01; /* make sure multicast bit is set */
2580 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2581 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2582 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2583 addr[5] << 8 | addr[4]);
2584 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2585 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2586 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2587 mask[5] << 8 | mask[4]);
2589 filter = NFE_READ(sc, NFE_RXFILTER);
2590 filter &= NFE_PFF_RX_PAUSE;
2591 filter |= NFE_RXFILTER_MAGIC;
2592 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2593 NFE_WRITE(sc, NFE_RXFILTER, filter);
2598 nfe_start(struct ifnet *ifp)
2600 struct nfe_softc *sc = ifp->if_softc;
2603 nfe_start_locked(ifp);
2608 nfe_start_locked(struct ifnet *ifp)
2610 struct nfe_softc *sc = ifp->if_softc;
2614 NFE_LOCK_ASSERT(sc);
2616 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2617 IFF_DRV_RUNNING || sc->nfe_link == 0)
2620 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2621 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2625 if (nfe_encap(sc, &m0) != 0) {
2628 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2629 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2633 ETHER_BPF_MTAP(ifp, m0);
2637 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2638 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2641 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2644 * Set a timeout in case the chip goes out to lunch.
2646 sc->nfe_watchdog_timer = 5;
2652 nfe_watchdog(struct ifnet *ifp)
2654 struct nfe_softc *sc = ifp->if_softc;
2656 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2659 /* Check if we've lost Tx completion interrupt. */
2661 if (sc->txq.queued == 0) {
2662 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2664 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2665 nfe_start_locked(ifp);
2668 /* Check if we've lost start Tx command. */
2670 if (sc->nfe_force_tx <= 3) {
2672 * If this is the case for watchdog timeout, the following
2673 * code should go to nfe_txeof().
2675 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2678 sc->nfe_force_tx = 0;
2680 if_printf(ifp, "watchdog timeout\n");
2682 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2684 nfe_init_locked(sc);
2691 struct nfe_softc *sc = xsc;
2694 nfe_init_locked(sc);
2700 nfe_init_locked(void *xsc)
2702 struct nfe_softc *sc = xsc;
2703 struct ifnet *ifp = sc->nfe_ifp;
2704 struct mii_data *mii;
2708 NFE_LOCK_ASSERT(sc);
2710 mii = device_get_softc(sc->nfe_miibus);
2712 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2717 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2719 nfe_init_tx_ring(sc, &sc->txq);
2720 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2721 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2723 error = nfe_init_rx_ring(sc, &sc->rxq);
2725 device_printf(sc->nfe_dev,
2726 "initialization failed: no memory for rx buffers\n");
2732 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2733 val |= NFE_MAC_ADDR_INORDER;
2734 NFE_WRITE(sc, NFE_TX_UNK, val);
2735 NFE_WRITE(sc, NFE_STATUS, 0);
2737 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2738 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2740 sc->rxtxctl = NFE_RXTX_BIT2;
2741 if (sc->nfe_flags & NFE_40BIT_ADDR)
2742 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2743 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2744 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2746 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2747 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2748 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2749 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2751 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2753 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2755 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2756 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2758 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2760 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2762 /* set MAC address */
2763 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2765 /* tell MAC where rings are in memory */
2766 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2767 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2768 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2769 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2770 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2772 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2773 NFE_ADDR_HI(sc->rxq.physaddr));
2774 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2775 NFE_ADDR_LO(sc->rxq.physaddr));
2777 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2778 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2780 NFE_WRITE(sc, NFE_RING_SIZE,
2781 (NFE_RX_RING_COUNT - 1) << 16 |
2782 (NFE_TX_RING_COUNT - 1));
2784 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2786 /* force MAC to wakeup */
2787 val = NFE_READ(sc, NFE_PWR_STATE);
2788 if ((val & NFE_PWR_WAKEUP) == 0)
2789 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2791 val = NFE_READ(sc, NFE_PWR_STATE);
2792 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2795 /* configure interrupts coalescing/mitigation */
2796 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2798 /* no interrupt mitigation: one interrupt per packet */
2799 NFE_WRITE(sc, NFE_IMTIMER, 970);
2802 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2803 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2804 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2806 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2807 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2809 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2811 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2813 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2814 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2816 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2822 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2825 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2827 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2829 /* Clear hardware stats. */
2830 nfe_stats_clear(sc);
2832 #ifdef DEVICE_POLLING
2833 if (ifp->if_capenable & IFCAP_POLLING)
2834 nfe_disable_intr(sc);
2838 nfe_enable_intr(sc); /* enable interrupts */
2840 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2841 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2846 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2851 nfe_stop(struct ifnet *ifp)
2853 struct nfe_softc *sc = ifp->if_softc;
2854 struct nfe_rx_ring *rx_ring;
2855 struct nfe_jrx_ring *jrx_ring;
2856 struct nfe_tx_ring *tx_ring;
2857 struct nfe_rx_data *rdata;
2858 struct nfe_tx_data *tdata;
2861 NFE_LOCK_ASSERT(sc);
2863 sc->nfe_watchdog_timer = 0;
2864 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2866 callout_stop(&sc->nfe_stat_ch);
2869 NFE_WRITE(sc, NFE_TX_CTL, 0);
2872 NFE_WRITE(sc, NFE_RX_CTL, 0);
2874 /* disable interrupts */
2875 nfe_disable_intr(sc);
2879 /* free Rx and Tx mbufs still in the queues. */
2881 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2882 rdata = &rx_ring->data[i];
2883 if (rdata->m != NULL) {
2884 bus_dmamap_sync(rx_ring->rx_data_tag,
2885 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2886 bus_dmamap_unload(rx_ring->rx_data_tag,
2887 rdata->rx_data_map);
2893 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2894 jrx_ring = &sc->jrxq;
2895 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2896 rdata = &jrx_ring->jdata[i];
2897 if (rdata->m != NULL) {
2898 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2899 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2900 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2901 rdata->rx_data_map);
2909 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2910 tdata = &tx_ring->data[i];
2911 if (tdata->m != NULL) {
2912 bus_dmamap_sync(tx_ring->tx_data_tag,
2913 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2914 bus_dmamap_unload(tx_ring->tx_data_tag,
2915 tdata->tx_data_map);
2920 /* Update hardware stats. */
2921 nfe_stats_update(sc);
2926 nfe_ifmedia_upd(struct ifnet *ifp)
2928 struct nfe_softc *sc = ifp->if_softc;
2929 struct mii_data *mii;
2932 mii = device_get_softc(sc->nfe_miibus);
2941 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2943 struct nfe_softc *sc;
2944 struct mii_data *mii;
2949 mii = device_get_softc(sc->nfe_miibus);
2952 ifmr->ifm_active = mii->mii_media_active;
2953 ifmr->ifm_status = mii->mii_media_status;
2961 struct nfe_softc *sc;
2962 struct mii_data *mii;
2965 sc = (struct nfe_softc *)xsc;
2967 NFE_LOCK_ASSERT(sc);
2971 mii = device_get_softc(sc->nfe_miibus);
2973 nfe_stats_update(sc);
2975 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2980 nfe_shutdown(device_t dev)
2983 return (nfe_suspend(dev));
2988 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2992 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2993 val = NFE_READ(sc, NFE_MACADDR_LO);
2994 addr[0] = (val >> 8) & 0xff;
2995 addr[1] = (val & 0xff);
2997 val = NFE_READ(sc, NFE_MACADDR_HI);
2998 addr[2] = (val >> 24) & 0xff;
2999 addr[3] = (val >> 16) & 0xff;
3000 addr[4] = (val >> 8) & 0xff;
3001 addr[5] = (val & 0xff);
3003 val = NFE_READ(sc, NFE_MACADDR_LO);
3004 addr[5] = (val >> 8) & 0xff;
3005 addr[4] = (val & 0xff);
3007 val = NFE_READ(sc, NFE_MACADDR_HI);
3008 addr[3] = (val >> 24) & 0xff;
3009 addr[2] = (val >> 16) & 0xff;
3010 addr[1] = (val >> 8) & 0xff;
3011 addr[0] = (val & 0xff);
3017 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3020 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3021 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3022 addr[1] << 8 | addr[0]);
3027 * Map a single buffer address.
3031 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3033 struct nfe_dmamap_arg *ctx;
3038 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3040 ctx = (struct nfe_dmamap_arg *)arg;
3041 ctx->nfe_busaddr = segs[0].ds_addr;
3046 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3052 value = *(int *)arg1;
3053 error = sysctl_handle_int(oidp, &value, 0, req);
3054 if (error || !req->newptr)
3056 if (value < low || value > high)
3058 *(int *)arg1 = value;
3065 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3068 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3073 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3074 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3075 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3076 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3079 nfe_sysctl_node(struct nfe_softc *sc)
3081 struct sysctl_ctx_list *ctx;
3082 struct sysctl_oid_list *child, *parent;
3083 struct sysctl_oid *tree;
3084 struct nfe_hw_stats *stats;
3087 stats = &sc->nfe_stats;
3088 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3089 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3090 SYSCTL_ADD_PROC(ctx, child,
3091 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3092 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3093 "max number of Rx events to process");
3095 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3096 error = resource_int_value(device_get_name(sc->nfe_dev),
3097 device_get_unit(sc->nfe_dev), "process_limit",
3098 &sc->nfe_process_limit);
3100 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3101 sc->nfe_process_limit > NFE_PROC_MAX) {
3102 device_printf(sc->nfe_dev,
3103 "process_limit value out of range; "
3104 "using default: %d\n", NFE_PROC_DEFAULT);
3105 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3109 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3112 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3113 NULL, "NFE statistics");
3114 parent = SYSCTL_CHILDREN(tree);
3116 /* Rx statistics. */
3117 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3118 NULL, "Rx MAC statistics");
3119 child = SYSCTL_CHILDREN(tree);
3121 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3122 &stats->rx_frame_errors, "Framing Errors");
3123 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3124 &stats->rx_extra_bytes, "Extra Bytes");
3125 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3126 &stats->rx_late_cols, "Late Collisions");
3127 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3128 &stats->rx_runts, "Runts");
3129 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3130 &stats->rx_jumbos, "Jumbos");
3131 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3132 &stats->rx_fifo_overuns, "FIFO Overruns");
3133 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3134 &stats->rx_crc_errors, "CRC Errors");
3135 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3136 &stats->rx_fae, "Frame Alignment Errors");
3137 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3138 &stats->rx_len_errors, "Length Errors");
3139 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3140 &stats->rx_unicast, "Unicast Frames");
3141 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3142 &stats->rx_multicast, "Multicast Frames");
3143 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3144 &stats->rx_broadcast, "Broadcast Frames");
3145 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3146 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3147 &stats->rx_octets, "Octets");
3148 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3149 &stats->rx_pause, "Pause frames");
3150 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3151 &stats->rx_drops, "Drop frames");
3154 /* Tx statistics. */
3155 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3156 NULL, "Tx MAC statistics");
3157 child = SYSCTL_CHILDREN(tree);
3158 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3159 &stats->tx_octets, "Octets");
3160 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3161 &stats->tx_zero_rexmits, "Zero Retransmits");
3162 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3163 &stats->tx_one_rexmits, "One Retransmits");
3164 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3165 &stats->tx_multi_rexmits, "Multiple Retransmits");
3166 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3167 &stats->tx_late_cols, "Late Collisions");
3168 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3169 &stats->tx_fifo_underuns, "FIFO Underruns");
3170 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3171 &stats->tx_carrier_losts, "Carrier Losts");
3172 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3173 &stats->tx_excess_deferals, "Excess Deferrals");
3174 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3175 &stats->tx_retry_errors, "Retry Errors");
3176 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3177 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3178 &stats->tx_deferals, "Deferrals");
3179 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3180 &stats->tx_frames, "Frames");
3181 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3182 &stats->tx_pause, "Pause Frames");
3184 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3185 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3186 &stats->tx_deferals, "Unicast Frames");
3187 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3188 &stats->tx_frames, "Multicast Frames");
3189 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3190 &stats->tx_pause, "Broadcast Frames");
3194 #undef NFE_SYSCTL_STAT_ADD32
3195 #undef NFE_SYSCTL_STAT_ADD64
3198 nfe_stats_clear(struct nfe_softc *sc)
3202 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3203 mib_cnt = NFE_NUM_MIB_STATV1;
3204 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3205 mib_cnt = NFE_NUM_MIB_STATV2;
3209 for (i = 0; i < mib_cnt; i++)
3210 NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
3212 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3213 NFE_READ(sc, NFE_TX_UNICAST);
3214 NFE_READ(sc, NFE_TX_MULTICAST);
3215 NFE_READ(sc, NFE_TX_BROADCAST);
3220 nfe_stats_update(struct nfe_softc *sc)
3222 struct nfe_hw_stats *stats;
3224 NFE_LOCK_ASSERT(sc);
3226 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3229 stats = &sc->nfe_stats;
3230 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3231 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3232 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3233 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3234 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3235 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3236 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3237 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3238 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3239 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3240 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3241 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3242 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3243 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3244 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3245 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3246 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3247 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3248 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3249 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3250 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3252 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3253 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3254 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3255 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3256 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3257 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3258 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3261 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3262 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3263 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3264 stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3270 nfe_set_linkspeed(struct nfe_softc *sc)
3272 struct mii_softc *miisc;
3273 struct mii_data *mii;
3276 NFE_LOCK_ASSERT(sc);
3278 mii = device_get_softc(sc->nfe_miibus);
3281 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3282 (IFM_ACTIVE | IFM_AVALID)) {
3283 switch IFM_SUBTYPE(mii->mii_media_active) {
3294 miisc = LIST_FIRST(&mii->mii_phys);
3295 phyno = miisc->mii_phy;
3296 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3298 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3299 nfe_miibus_writereg(sc->nfe_dev, phyno,
3300 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3301 nfe_miibus_writereg(sc->nfe_dev, phyno,
3302 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3306 * Poll link state until nfe(4) get a 10/100Mbps link.
3308 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3310 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3311 == (IFM_ACTIVE | IFM_AVALID)) {
3312 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3315 nfe_mac_config(sc, mii);
3322 pause("nfelnk", hz);
3325 if (i == MII_ANEGTICKS_GIGE)
3326 device_printf(sc->nfe_dev,
3327 "establishing a link failed, WOL may not work!");
3330 * No link, force MAC to have 100Mbps, full-duplex link.
3331 * This is the last resort and may/may not work.
3333 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3334 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3335 nfe_mac_config(sc, mii);
3340 nfe_set_wol(struct nfe_softc *sc)
3347 NFE_LOCK_ASSERT(sc);
3349 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3352 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3353 wolctl = NFE_WOL_MAGIC;
3356 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3357 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3358 nfe_set_linkspeed(sc);
3359 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3360 NFE_WRITE(sc, NFE_PWR2_CTL,
3361 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3363 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3364 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3365 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3368 /* Request PME if WOL is requested. */
3369 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3370 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3371 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3372 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3373 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);