1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
71 /* "device miibus" required. See GENERIC if you get errors here. */
72 #include "miibus_if.h"
74 static int nfe_probe(device_t);
75 static int nfe_attach(device_t);
76 static int nfe_detach(device_t);
77 static int nfe_suspend(device_t);
78 static int nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static int nfe_can_use_msix(struct nfe_softc *);
81 static void nfe_power(struct nfe_softc *);
82 static int nfe_miibus_readreg(device_t, int, int);
83 static int nfe_miibus_writereg(device_t, int, int, int);
84 static void nfe_miibus_statchg(device_t);
85 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
86 static void nfe_set_intr(struct nfe_softc *);
87 static __inline void nfe_enable_intr(struct nfe_softc *);
88 static __inline void nfe_disable_intr(struct nfe_softc *);
89 static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
90 static void nfe_alloc_msix(struct nfe_softc *, int);
91 static int nfe_intr(void *);
92 static void nfe_int_task(void *, int);
93 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
94 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
95 static int nfe_newbuf(struct nfe_softc *, int);
96 static int nfe_jnewbuf(struct nfe_softc *, int);
97 static int nfe_rxeof(struct nfe_softc *, int, int *);
98 static int nfe_jrxeof(struct nfe_softc *, int, int *);
99 static void nfe_txeof(struct nfe_softc *);
100 static int nfe_encap(struct nfe_softc *, struct mbuf **);
101 static void nfe_setmulti(struct nfe_softc *);
102 static void nfe_start(struct ifnet *);
103 static void nfe_start_locked(struct ifnet *);
104 static void nfe_watchdog(struct ifnet *);
105 static void nfe_init(void *);
106 static void nfe_init_locked(void *);
107 static void nfe_stop(struct ifnet *);
108 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
109 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
110 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static int nfe_ifmedia_upd(struct ifnet *);
118 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
119 static void nfe_tick(void *);
120 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
121 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
124 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
125 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
126 static void nfe_sysctl_node(struct nfe_softc *);
127 static void nfe_stats_clear(struct nfe_softc *);
128 static void nfe_stats_update(struct nfe_softc *);
129 static void nfe_set_linkspeed(struct nfe_softc *);
130 static void nfe_set_wol(struct nfe_softc *);
133 static int nfedebug = 0;
134 #define DPRINTF(sc, ...) do { \
136 device_printf((sc)->nfe_dev, __VA_ARGS__); \
138 #define DPRINTFN(sc, n, ...) do { \
139 if (nfedebug >= (n)) \
140 device_printf((sc)->nfe_dev, __VA_ARGS__); \
143 #define DPRINTF(sc, ...)
144 #define DPRINTFN(sc, n, ...)
147 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
148 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
149 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
152 static int msi_disable = 0;
153 static int msix_disable = 0;
154 static int jumbo_disable = 0;
155 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
156 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
157 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
159 static device_method_t nfe_methods[] = {
160 /* Device interface */
161 DEVMETHOD(device_probe, nfe_probe),
162 DEVMETHOD(device_attach, nfe_attach),
163 DEVMETHOD(device_detach, nfe_detach),
164 DEVMETHOD(device_suspend, nfe_suspend),
165 DEVMETHOD(device_resume, nfe_resume),
166 DEVMETHOD(device_shutdown, nfe_shutdown),
169 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
170 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
171 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
176 static driver_t nfe_driver = {
179 sizeof(struct nfe_softc)
182 static devclass_t nfe_devclass;
184 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
185 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
187 static struct nfe_type nfe_devs[] = {
188 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
189 "NVIDIA nForce MCP Networking Adapter"},
190 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
191 "NVIDIA nForce2 MCP2 Networking Adapter"},
192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
193 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
195 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
197 "NVIDIA nForce3 MCP3 Networking Adapter"},
198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
199 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
201 "NVIDIA nForce3 MCP7 Networking Adapter"},
202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
203 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
205 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
207 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
209 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
211 "NVIDIA nForce 430 MCP12 Networking Adapter"},
212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
213 "NVIDIA nForce 430 MCP13 Networking Adapter"},
214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
215 "NVIDIA nForce MCP55 Networking Adapter"},
216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
217 "NVIDIA nForce MCP55 Networking Adapter"},
218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
219 "NVIDIA nForce MCP61 Networking Adapter"},
220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
221 "NVIDIA nForce MCP61 Networking Adapter"},
222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
223 "NVIDIA nForce MCP61 Networking Adapter"},
224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
225 "NVIDIA nForce MCP61 Networking Adapter"},
226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
227 "NVIDIA nForce MCP65 Networking Adapter"},
228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
229 "NVIDIA nForce MCP65 Networking Adapter"},
230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
231 "NVIDIA nForce MCP65 Networking Adapter"},
232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
233 "NVIDIA nForce MCP65 Networking Adapter"},
234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
235 "NVIDIA nForce MCP67 Networking Adapter"},
236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
237 "NVIDIA nForce MCP67 Networking Adapter"},
238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
239 "NVIDIA nForce MCP67 Networking Adapter"},
240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
241 "NVIDIA nForce MCP67 Networking Adapter"},
242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
243 "NVIDIA nForce MCP73 Networking Adapter"},
244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
245 "NVIDIA nForce MCP73 Networking Adapter"},
246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
247 "NVIDIA nForce MCP73 Networking Adapter"},
248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
249 "NVIDIA nForce MCP73 Networking Adapter"},
250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
251 "NVIDIA nForce MCP77 Networking Adapter"},
252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
253 "NVIDIA nForce MCP77 Networking Adapter"},
254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
255 "NVIDIA nForce MCP77 Networking Adapter"},
256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
257 "NVIDIA nForce MCP77 Networking Adapter"},
258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
259 "NVIDIA nForce MCP79 Networking Adapter"},
260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
261 "NVIDIA nForce MCP79 Networking Adapter"},
262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
263 "NVIDIA nForce MCP79 Networking Adapter"},
264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
265 "NVIDIA nForce MCP79 Networking Adapter"},
270 /* Probe for supported hardware ID's */
272 nfe_probe(device_t dev)
277 /* Check for matching PCI DEVICE ID's */
278 while (t->name != NULL) {
279 if ((pci_get_vendor(dev) == t->vid_id) &&
280 (pci_get_device(dev) == t->dev_id)) {
281 device_set_desc(dev, t->name);
282 return (BUS_PROBE_DEFAULT);
291 nfe_alloc_msix(struct nfe_softc *sc, int count)
296 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
298 if (sc->nfe_msix_res == NULL) {
299 device_printf(sc->nfe_dev,
300 "couldn't allocate MSIX table resource\n");
304 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
305 SYS_RES_MEMORY, &rid, RF_ACTIVE);
306 if (sc->nfe_msix_pba_res == NULL) {
307 device_printf(sc->nfe_dev,
308 "couldn't allocate MSIX PBA resource\n");
309 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
311 sc->nfe_msix_res = NULL;
315 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
316 if (count == NFE_MSI_MESSAGES) {
318 device_printf(sc->nfe_dev,
319 "Using %d MSIX messages\n", count);
323 device_printf(sc->nfe_dev,
324 "couldn't allocate MSIX\n");
325 pci_release_msi(sc->nfe_dev);
326 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
327 PCIR_BAR(3), sc->nfe_msix_pba_res);
328 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
329 PCIR_BAR(2), sc->nfe_msix_res);
330 sc->nfe_msix_pba_res = NULL;
331 sc->nfe_msix_res = NULL;
337 nfe_attach(device_t dev)
339 struct nfe_softc *sc;
341 bus_addr_t dma_addr_max;
342 int error = 0, i, msic, reg, rid;
344 sc = device_get_softc(dev);
347 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
349 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
351 pci_enable_busmaster(dev);
354 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
356 if (sc->nfe_res[0] == NULL) {
357 device_printf(dev, "couldn't map memory resources\n");
358 mtx_destroy(&sc->nfe_mtx);
362 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
365 v = pci_read_config(dev, reg + 0x08, 2);
366 /* Change max. read request size to 4096. */
369 pci_write_config(dev, reg + 0x08, v, 2);
371 v = pci_read_config(dev, reg + 0x0c, 2);
372 /* link capability */
374 width = pci_read_config(dev, reg + 0x12, 2);
375 /* negotiated link width */
376 width = (width >> 4) & 0x3f;
378 device_printf(sc->nfe_dev,
379 "warning, negotiated width of link(x%d) != "
380 "max. width of link(x%d)\n", width, v);
383 if (nfe_can_use_msix(sc) == 0) {
384 device_printf(sc->nfe_dev,
385 "MSI/MSI-X capability black-listed, will use INTx\n");
390 /* Allocate interrupt */
391 if (msix_disable == 0 || msi_disable == 0) {
392 if (msix_disable == 0 &&
393 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
394 nfe_alloc_msix(sc, msic);
395 if (msi_disable == 0 && sc->nfe_msix == 0 &&
396 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
397 pci_alloc_msi(dev, &msic) == 0) {
398 if (msic == NFE_MSI_MESSAGES) {
401 "Using %d MSI messages\n", msic);
404 pci_release_msi(dev);
408 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
410 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
411 RF_SHAREABLE | RF_ACTIVE);
412 if (sc->nfe_irq[0] == NULL) {
413 device_printf(dev, "couldn't allocate IRQ resources\n");
418 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
419 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
420 SYS_RES_IRQ, &rid, RF_ACTIVE);
421 if (sc->nfe_irq[i] == NULL) {
423 "couldn't allocate IRQ resources for "
424 "message %d\n", rid);
429 /* Map interrupts to vector 0. */
430 if (sc->nfe_msix != 0) {
431 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
432 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
433 } else if (sc->nfe_msi != 0) {
434 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
435 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
439 /* Set IRQ status/mask register. */
440 sc->nfe_irq_status = NFE_IRQ_STATUS;
441 sc->nfe_irq_mask = NFE_IRQ_MASK;
442 sc->nfe_intrs = NFE_IRQ_WANTED;
444 if (sc->nfe_msix != 0) {
445 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
446 sc->nfe_nointrs = NFE_IRQ_WANTED;
447 } else if (sc->nfe_msi != 0) {
448 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
449 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
452 sc->nfe_devid = pci_get_device(dev);
453 sc->nfe_revid = pci_get_revid(dev);
456 switch (sc->nfe_devid) {
457 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
458 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
459 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
460 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
461 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
463 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
464 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
465 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
467 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
468 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
469 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
470 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
471 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
474 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
475 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
476 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
477 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
480 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
481 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
482 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
483 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
484 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
485 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
486 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
487 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
488 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
489 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
490 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
491 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
492 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
493 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
495 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
496 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
497 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
498 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
499 /* XXX flow control */
500 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
501 NFE_CORRECT_MACADDR | NFE_MIB_V3;
503 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
504 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
505 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
506 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
507 /* XXX flow control */
508 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
509 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
511 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
512 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
513 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
514 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
515 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
516 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
522 /* Check for reversed ethernet address */
523 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
524 sc->nfe_flags |= NFE_CORRECT_MACADDR;
525 nfe_get_macaddr(sc, sc->eaddr);
527 * Allocate the parent bus DMA tag appropriate for PCI.
529 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
530 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
531 dma_addr_max = NFE_DMA_MAXADDR;
532 error = bus_dma_tag_create(
533 bus_get_dma_tag(sc->nfe_dev), /* parent */
534 1, 0, /* alignment, boundary */
535 dma_addr_max, /* lowaddr */
536 BUS_SPACE_MAXADDR, /* highaddr */
537 NULL, NULL, /* filter, filterarg */
538 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
539 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
541 NULL, NULL, /* lockfunc, lockarg */
542 &sc->nfe_parent_tag);
546 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
548 device_printf(dev, "can not if_alloc()\n");
554 * Allocate Tx and Rx rings.
556 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
559 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
562 nfe_alloc_jrx_ring(sc, &sc->jrxq);
563 /* Create sysctl node. */
567 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
568 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
569 ifp->if_ioctl = nfe_ioctl;
570 ifp->if_start = nfe_start;
571 ifp->if_hwassist = 0;
572 ifp->if_capabilities = 0;
573 ifp->if_init = nfe_init;
574 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
575 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
576 IFQ_SET_READY(&ifp->if_snd);
578 if (sc->nfe_flags & NFE_HW_CSUM) {
579 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
580 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
582 ifp->if_capenable = ifp->if_capabilities;
584 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
585 /* VLAN capability setup. */
586 ifp->if_capabilities |= IFCAP_VLAN_MTU;
587 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
588 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
589 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
590 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM |
594 if (pci_find_cap(dev, PCIY_PMG, ®) == 0)
595 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
596 ifp->if_capenable = ifp->if_capabilities;
599 * Tell the upper layer(s) we support long frames.
600 * Must appear after the call to ether_ifattach() because
601 * ether_ifattach() sets ifi_hdrlen to the default value.
603 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
605 #ifdef DEVICE_POLLING
606 ifp->if_capabilities |= IFCAP_POLLING;
610 error = mii_attach(dev, &sc->nfe_miibus, ifp, nfe_ifmedia_upd,
611 nfe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
614 device_printf(dev, "attaching PHYs failed\n");
617 ether_ifattach(ifp, sc->eaddr);
619 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
620 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
621 taskqueue_thread_enqueue, &sc->nfe_tq);
622 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
623 device_get_nameunit(sc->nfe_dev));
625 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
626 error = bus_setup_intr(dev, sc->nfe_irq[0],
627 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
628 &sc->nfe_intrhand[0]);
630 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
631 error = bus_setup_intr(dev, sc->nfe_irq[i],
632 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
633 &sc->nfe_intrhand[i]);
639 device_printf(dev, "couldn't set up irq\n");
640 taskqueue_free(sc->nfe_tq);
655 nfe_detach(device_t dev)
657 struct nfe_softc *sc;
659 uint8_t eaddr[ETHER_ADDR_LEN];
662 sc = device_get_softc(dev);
663 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
666 #ifdef DEVICE_POLLING
667 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
668 ether_poll_deregister(ifp);
670 if (device_is_attached(dev)) {
673 ifp->if_flags &= ~IFF_UP;
675 callout_drain(&sc->nfe_stat_ch);
680 /* restore ethernet address */
681 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
682 for (i = 0; i < ETHER_ADDR_LEN; i++) {
683 eaddr[i] = sc->eaddr[5 - i];
686 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
687 nfe_set_macaddr(sc, eaddr);
691 device_delete_child(dev, sc->nfe_miibus);
692 bus_generic_detach(dev);
693 if (sc->nfe_tq != NULL) {
694 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
695 taskqueue_free(sc->nfe_tq);
699 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
700 if (sc->nfe_intrhand[i] != NULL) {
701 bus_teardown_intr(dev, sc->nfe_irq[i],
702 sc->nfe_intrhand[i]);
703 sc->nfe_intrhand[i] = NULL;
707 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
708 if (sc->nfe_irq[0] != NULL)
709 bus_release_resource(dev, SYS_RES_IRQ, 0,
712 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
713 if (sc->nfe_irq[i] != NULL) {
714 bus_release_resource(dev, SYS_RES_IRQ, rid,
716 sc->nfe_irq[i] = NULL;
719 pci_release_msi(dev);
721 if (sc->nfe_msix_pba_res != NULL) {
722 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
723 sc->nfe_msix_pba_res);
724 sc->nfe_msix_pba_res = NULL;
726 if (sc->nfe_msix_res != NULL) {
727 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
729 sc->nfe_msix_res = NULL;
731 if (sc->nfe_res[0] != NULL) {
732 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
734 sc->nfe_res[0] = NULL;
737 nfe_free_tx_ring(sc, &sc->txq);
738 nfe_free_rx_ring(sc, &sc->rxq);
739 nfe_free_jrx_ring(sc, &sc->jrxq);
741 if (sc->nfe_parent_tag) {
742 bus_dma_tag_destroy(sc->nfe_parent_tag);
743 sc->nfe_parent_tag = NULL;
746 mtx_destroy(&sc->nfe_mtx);
753 nfe_suspend(device_t dev)
755 struct nfe_softc *sc;
757 sc = device_get_softc(dev);
760 nfe_stop(sc->nfe_ifp);
762 sc->nfe_suspended = 1;
770 nfe_resume(device_t dev)
772 struct nfe_softc *sc;
775 sc = device_get_softc(dev);
780 if (ifp->if_flags & IFF_UP)
782 sc->nfe_suspended = 0;
790 nfe_can_use_msix(struct nfe_softc *sc)
792 static struct msix_blacklist {
795 } msix_blacklists[] = {
796 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
799 struct msix_blacklist *mblp;
800 char *maker, *product;
801 int count, n, use_msix;
804 * Search base board manufacturer and product name table
805 * to see this system has a known MSI/MSI-X issue.
807 maker = getenv("smbios.planar.maker");
808 product = getenv("smbios.planar.product");
810 if (maker != NULL && product != NULL) {
811 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
812 mblp = msix_blacklists;
813 for (n = 0; n < count; n++) {
814 if (strcmp(maker, mblp->maker) == 0 &&
815 strcmp(product, mblp->product) == 0) {
831 /* Take PHY/NIC out of powerdown, from Linux */
833 nfe_power(struct nfe_softc *sc)
837 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
839 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
840 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
842 NFE_WRITE(sc, NFE_MAC_RESET, 0);
844 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
845 pwr = NFE_READ(sc, NFE_PWR2_CTL);
846 pwr &= ~NFE_PWR2_WAKEUP_MASK;
847 if (sc->nfe_revid >= 0xa3 &&
848 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
849 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
850 pwr |= NFE_PWR2_REVA3;
851 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
856 nfe_miibus_statchg(device_t dev)
858 struct nfe_softc *sc;
859 struct mii_data *mii;
861 uint32_t rxctl, txctl;
863 sc = device_get_softc(dev);
865 mii = device_get_softc(sc->nfe_miibus);
869 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
870 (IFM_ACTIVE | IFM_AVALID)) {
871 switch (IFM_SUBTYPE(mii->mii_media_active)) {
882 nfe_mac_config(sc, mii);
883 txctl = NFE_READ(sc, NFE_TX_CTL);
884 rxctl = NFE_READ(sc, NFE_RX_CTL);
885 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
886 txctl |= NFE_TX_START;
887 rxctl |= NFE_RX_START;
889 txctl &= ~NFE_TX_START;
890 rxctl &= ~NFE_RX_START;
892 NFE_WRITE(sc, NFE_TX_CTL, txctl);
893 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
898 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
900 uint32_t link, misc, phy, seed;
905 phy = NFE_READ(sc, NFE_PHY_IFACE);
906 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
908 seed = NFE_READ(sc, NFE_RNDSEED);
909 seed &= ~NFE_SEED_MASK;
911 misc = NFE_MISC1_MAGIC;
912 link = NFE_MEDIA_SET;
914 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
915 phy |= NFE_PHY_HDX; /* half-duplex */
916 misc |= NFE_MISC1_HDX;
919 switch (IFM_SUBTYPE(mii->mii_media_active)) {
920 case IFM_1000_T: /* full-duplex only */
921 link |= NFE_MEDIA_1000T;
922 seed |= NFE_SEED_1000T;
923 phy |= NFE_PHY_1000T;
926 link |= NFE_MEDIA_100TX;
927 seed |= NFE_SEED_100TX;
928 phy |= NFE_PHY_100TX;
931 link |= NFE_MEDIA_10T;
932 seed |= NFE_SEED_10T;
936 if ((phy & 0x10000000) != 0) {
937 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
938 val = NFE_R1_MAGIC_1000;
940 val = NFE_R1_MAGIC_10_100;
942 val = NFE_R1_MAGIC_DEFAULT;
943 NFE_WRITE(sc, NFE_SETUP_R1, val);
945 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
947 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
948 NFE_WRITE(sc, NFE_MISC1, misc);
949 NFE_WRITE(sc, NFE_LINKSPEED, link);
951 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
952 /* It seems all hardwares supports Rx pause frames. */
953 val = NFE_READ(sc, NFE_RXFILTER);
954 if ((IFM_OPTIONS(mii->mii_media_active) &
955 IFM_ETH_RXPAUSE) != 0)
956 val |= NFE_PFF_RX_PAUSE;
958 val &= ~NFE_PFF_RX_PAUSE;
959 NFE_WRITE(sc, NFE_RXFILTER, val);
960 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
961 val = NFE_READ(sc, NFE_MISC1);
962 if ((IFM_OPTIONS(mii->mii_media_active) &
963 IFM_ETH_TXPAUSE) != 0) {
964 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
965 NFE_TX_PAUSE_FRAME_ENABLE);
966 val |= NFE_MISC1_TX_PAUSE;
968 val &= ~NFE_MISC1_TX_PAUSE;
969 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
970 NFE_TX_PAUSE_FRAME_DISABLE);
972 NFE_WRITE(sc, NFE_MISC1, val);
975 /* disable rx/tx pause frames */
976 val = NFE_READ(sc, NFE_RXFILTER);
977 val &= ~NFE_PFF_RX_PAUSE;
978 NFE_WRITE(sc, NFE_RXFILTER, val);
979 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
980 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
981 NFE_TX_PAUSE_FRAME_DISABLE);
982 val = NFE_READ(sc, NFE_MISC1);
983 val &= ~NFE_MISC1_TX_PAUSE;
984 NFE_WRITE(sc, NFE_MISC1, val);
991 nfe_miibus_readreg(device_t dev, int phy, int reg)
993 struct nfe_softc *sc = device_get_softc(dev);
997 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
999 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1000 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1004 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1006 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1008 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1011 if (ntries == NFE_TIMEOUT) {
1012 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1016 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1017 DPRINTFN(sc, 2, "could not read PHY\n");
1021 val = NFE_READ(sc, NFE_PHY_DATA);
1022 if (val != 0xffffffff && val != 0)
1023 sc->mii_phyaddr = phy;
1025 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1032 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1034 struct nfe_softc *sc = device_get_softc(dev);
1038 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1040 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1041 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1045 NFE_WRITE(sc, NFE_PHY_DATA, val);
1046 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1047 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1049 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1051 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1055 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1056 device_printf(sc->nfe_dev, "could not write to PHY\n");
1061 struct nfe_dmamap_arg {
1062 bus_addr_t nfe_busaddr;
1066 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1068 struct nfe_dmamap_arg ctx;
1069 struct nfe_rx_data *data;
1071 int i, error, descsize;
1073 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1074 desc = ring->desc64;
1075 descsize = sizeof (struct nfe_desc64);
1077 desc = ring->desc32;
1078 descsize = sizeof (struct nfe_desc32);
1081 ring->cur = ring->next = 0;
1083 error = bus_dma_tag_create(sc->nfe_parent_tag,
1084 NFE_RING_ALIGN, 0, /* alignment, boundary */
1085 BUS_SPACE_MAXADDR, /* lowaddr */
1086 BUS_SPACE_MAXADDR, /* highaddr */
1087 NULL, NULL, /* filter, filterarg */
1088 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1089 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1091 NULL, NULL, /* lockfunc, lockarg */
1092 &ring->rx_desc_tag);
1094 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1098 /* allocate memory to desc */
1099 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1100 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1102 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1105 if (sc->nfe_flags & NFE_40BIT_ADDR)
1106 ring->desc64 = desc;
1108 ring->desc32 = desc;
1110 /* map desc to device visible address space */
1111 ctx.nfe_busaddr = 0;
1112 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1113 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1115 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1118 ring->physaddr = ctx.nfe_busaddr;
1120 error = bus_dma_tag_create(sc->nfe_parent_tag,
1121 1, 0, /* alignment, boundary */
1122 BUS_SPACE_MAXADDR, /* lowaddr */
1123 BUS_SPACE_MAXADDR, /* highaddr */
1124 NULL, NULL, /* filter, filterarg */
1125 MCLBYTES, 1, /* maxsize, nsegments */
1126 MCLBYTES, /* maxsegsize */
1128 NULL, NULL, /* lockfunc, lockarg */
1129 &ring->rx_data_tag);
1131 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1135 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1137 device_printf(sc->nfe_dev,
1138 "could not create Rx DMA spare map\n");
1143 * Pre-allocate Rx buffers and populate Rx ring.
1145 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1146 data = &sc->rxq.data[i];
1147 data->rx_data_map = NULL;
1149 error = bus_dmamap_create(ring->rx_data_tag, 0,
1150 &data->rx_data_map);
1152 device_printf(sc->nfe_dev,
1153 "could not create Rx DMA map\n");
1164 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1166 struct nfe_dmamap_arg ctx;
1167 struct nfe_rx_data *data;
1169 int i, error, descsize;
1171 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1173 if (jumbo_disable != 0) {
1174 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1175 sc->nfe_jumbo_disable = 1;
1179 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1180 desc = ring->jdesc64;
1181 descsize = sizeof (struct nfe_desc64);
1183 desc = ring->jdesc32;
1184 descsize = sizeof (struct nfe_desc32);
1187 ring->jcur = ring->jnext = 0;
1189 /* Create DMA tag for jumbo Rx ring. */
1190 error = bus_dma_tag_create(sc->nfe_parent_tag,
1191 NFE_RING_ALIGN, 0, /* alignment, boundary */
1192 BUS_SPACE_MAXADDR, /* lowaddr */
1193 BUS_SPACE_MAXADDR, /* highaddr */
1194 NULL, NULL, /* filter, filterarg */
1195 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1197 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1199 NULL, NULL, /* lockfunc, lockarg */
1200 &ring->jrx_desc_tag);
1202 device_printf(sc->nfe_dev,
1203 "could not create jumbo ring DMA tag\n");
1207 /* Create DMA tag for jumbo Rx buffers. */
1208 error = bus_dma_tag_create(sc->nfe_parent_tag,
1209 1, 0, /* alignment, boundary */
1210 BUS_SPACE_MAXADDR, /* lowaddr */
1211 BUS_SPACE_MAXADDR, /* highaddr */
1212 NULL, NULL, /* filter, filterarg */
1213 MJUM9BYTES, /* maxsize */
1215 MJUM9BYTES, /* maxsegsize */
1217 NULL, NULL, /* lockfunc, lockarg */
1218 &ring->jrx_data_tag);
1220 device_printf(sc->nfe_dev,
1221 "could not create jumbo Rx buffer DMA tag\n");
1225 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1226 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1227 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1229 device_printf(sc->nfe_dev,
1230 "could not allocate DMA'able memory for jumbo Rx ring\n");
1233 if (sc->nfe_flags & NFE_40BIT_ADDR)
1234 ring->jdesc64 = desc;
1236 ring->jdesc32 = desc;
1238 ctx.nfe_busaddr = 0;
1239 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1240 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1242 device_printf(sc->nfe_dev,
1243 "could not load DMA'able memory for jumbo Rx ring\n");
1246 ring->jphysaddr = ctx.nfe_busaddr;
1248 /* Create DMA maps for jumbo Rx buffers. */
1249 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1251 device_printf(sc->nfe_dev,
1252 "could not create jumbo Rx DMA spare map\n");
1256 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1257 data = &sc->jrxq.jdata[i];
1258 data->rx_data_map = NULL;
1260 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1261 &data->rx_data_map);
1263 device_printf(sc->nfe_dev,
1264 "could not create jumbo Rx DMA map\n");
1273 * Running without jumbo frame support is ok for most cases
1274 * so don't fail on creating dma tag/map for jumbo frame.
1276 nfe_free_jrx_ring(sc, ring);
1277 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1278 "resource shortage\n");
1279 sc->nfe_jumbo_disable = 1;
1284 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1290 ring->cur = ring->next = 0;
1291 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1292 desc = ring->desc64;
1293 descsize = sizeof (struct nfe_desc64);
1295 desc = ring->desc32;
1296 descsize = sizeof (struct nfe_desc32);
1298 bzero(desc, descsize * NFE_RX_RING_COUNT);
1299 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1300 if (nfe_newbuf(sc, i) != 0)
1304 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1312 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1318 ring->jcur = ring->jnext = 0;
1319 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1320 desc = ring->jdesc64;
1321 descsize = sizeof (struct nfe_desc64);
1323 desc = ring->jdesc32;
1324 descsize = sizeof (struct nfe_desc32);
1326 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1327 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1328 if (nfe_jnewbuf(sc, i) != 0)
1332 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1333 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1340 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1342 struct nfe_rx_data *data;
1346 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1347 desc = ring->desc64;
1348 descsize = sizeof (struct nfe_desc64);
1350 desc = ring->desc32;
1351 descsize = sizeof (struct nfe_desc32);
1354 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1355 data = &ring->data[i];
1356 if (data->rx_data_map != NULL) {
1357 bus_dmamap_destroy(ring->rx_data_tag,
1359 data->rx_data_map = NULL;
1361 if (data->m != NULL) {
1366 if (ring->rx_data_tag != NULL) {
1367 if (ring->rx_spare_map != NULL) {
1368 bus_dmamap_destroy(ring->rx_data_tag,
1369 ring->rx_spare_map);
1370 ring->rx_spare_map = NULL;
1372 bus_dma_tag_destroy(ring->rx_data_tag);
1373 ring->rx_data_tag = NULL;
1377 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1378 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1379 ring->desc64 = NULL;
1380 ring->desc32 = NULL;
1381 ring->rx_desc_map = NULL;
1383 if (ring->rx_desc_tag != NULL) {
1384 bus_dma_tag_destroy(ring->rx_desc_tag);
1385 ring->rx_desc_tag = NULL;
1391 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1393 struct nfe_rx_data *data;
1397 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1400 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1401 desc = ring->jdesc64;
1402 descsize = sizeof (struct nfe_desc64);
1404 desc = ring->jdesc32;
1405 descsize = sizeof (struct nfe_desc32);
1408 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1409 data = &ring->jdata[i];
1410 if (data->rx_data_map != NULL) {
1411 bus_dmamap_destroy(ring->jrx_data_tag,
1413 data->rx_data_map = NULL;
1415 if (data->m != NULL) {
1420 if (ring->jrx_data_tag != NULL) {
1421 if (ring->jrx_spare_map != NULL) {
1422 bus_dmamap_destroy(ring->jrx_data_tag,
1423 ring->jrx_spare_map);
1424 ring->jrx_spare_map = NULL;
1426 bus_dma_tag_destroy(ring->jrx_data_tag);
1427 ring->jrx_data_tag = NULL;
1431 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1432 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1433 ring->jdesc64 = NULL;
1434 ring->jdesc32 = NULL;
1435 ring->jrx_desc_map = NULL;
1438 if (ring->jrx_desc_tag != NULL) {
1439 bus_dma_tag_destroy(ring->jrx_desc_tag);
1440 ring->jrx_desc_tag = NULL;
1446 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1448 struct nfe_dmamap_arg ctx;
1453 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1454 desc = ring->desc64;
1455 descsize = sizeof (struct nfe_desc64);
1457 desc = ring->desc32;
1458 descsize = sizeof (struct nfe_desc32);
1462 ring->cur = ring->next = 0;
1464 error = bus_dma_tag_create(sc->nfe_parent_tag,
1465 NFE_RING_ALIGN, 0, /* alignment, boundary */
1466 BUS_SPACE_MAXADDR, /* lowaddr */
1467 BUS_SPACE_MAXADDR, /* highaddr */
1468 NULL, NULL, /* filter, filterarg */
1469 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1470 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1472 NULL, NULL, /* lockfunc, lockarg */
1473 &ring->tx_desc_tag);
1475 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1479 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1480 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1482 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1485 if (sc->nfe_flags & NFE_40BIT_ADDR)
1486 ring->desc64 = desc;
1488 ring->desc32 = desc;
1490 ctx.nfe_busaddr = 0;
1491 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1492 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1494 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1497 ring->physaddr = ctx.nfe_busaddr;
1499 error = bus_dma_tag_create(sc->nfe_parent_tag,
1509 &ring->tx_data_tag);
1511 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1515 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1516 error = bus_dmamap_create(ring->tx_data_tag, 0,
1517 &ring->data[i].tx_data_map);
1519 device_printf(sc->nfe_dev,
1520 "could not create Tx DMA map\n");
1531 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1536 sc->nfe_force_tx = 0;
1538 ring->cur = ring->next = 0;
1539 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1540 desc = ring->desc64;
1541 descsize = sizeof (struct nfe_desc64);
1543 desc = ring->desc32;
1544 descsize = sizeof (struct nfe_desc32);
1546 bzero(desc, descsize * NFE_TX_RING_COUNT);
1548 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1549 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1554 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1556 struct nfe_tx_data *data;
1560 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1561 desc = ring->desc64;
1562 descsize = sizeof (struct nfe_desc64);
1564 desc = ring->desc32;
1565 descsize = sizeof (struct nfe_desc32);
1568 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1569 data = &ring->data[i];
1571 if (data->m != NULL) {
1572 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1573 BUS_DMASYNC_POSTWRITE);
1574 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1578 if (data->tx_data_map != NULL) {
1579 bus_dmamap_destroy(ring->tx_data_tag,
1581 data->tx_data_map = NULL;
1585 if (ring->tx_data_tag != NULL) {
1586 bus_dma_tag_destroy(ring->tx_data_tag);
1587 ring->tx_data_tag = NULL;
1591 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1592 BUS_DMASYNC_POSTWRITE);
1593 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1594 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1595 ring->desc64 = NULL;
1596 ring->desc32 = NULL;
1597 ring->tx_desc_map = NULL;
1598 bus_dma_tag_destroy(ring->tx_desc_tag);
1599 ring->tx_desc_tag = NULL;
1603 #ifdef DEVICE_POLLING
1604 static poll_handler_t nfe_poll;
1608 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1610 struct nfe_softc *sc = ifp->if_softc;
1616 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1621 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1622 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1624 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1626 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1627 nfe_start_locked(ifp);
1629 if (cmd == POLL_AND_CHECK_STATUS) {
1630 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1634 NFE_WRITE(sc, sc->nfe_irq_status, r);
1636 if (r & NFE_IRQ_LINK) {
1637 NFE_READ(sc, NFE_PHY_STATUS);
1638 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1639 DPRINTF(sc, "link state changed\n");
1645 #endif /* DEVICE_POLLING */
1648 nfe_set_intr(struct nfe_softc *sc)
1651 if (sc->nfe_msi != 0)
1652 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1656 /* In MSIX, a write to mask reegisters behaves as XOR. */
1657 static __inline void
1658 nfe_enable_intr(struct nfe_softc *sc)
1661 if (sc->nfe_msix != 0) {
1662 /* XXX Should have a better way to enable interrupts! */
1663 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1664 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1666 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1670 static __inline void
1671 nfe_disable_intr(struct nfe_softc *sc)
1674 if (sc->nfe_msix != 0) {
1675 /* XXX Should have a better way to disable interrupts! */
1676 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1677 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1679 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1684 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1686 struct nfe_softc *sc;
1688 struct mii_data *mii;
1689 int error, init, mask;
1692 ifr = (struct ifreq *) data;
1697 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1699 else if (ifp->if_mtu != ifr->ifr_mtu) {
1700 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1701 (sc->nfe_jumbo_disable != 0)) &&
1702 ifr->ifr_mtu > ETHERMTU)
1706 ifp->if_mtu = ifr->ifr_mtu;
1707 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1708 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1709 nfe_init_locked(sc);
1717 if (ifp->if_flags & IFF_UP) {
1719 * If only the PROMISC or ALLMULTI flag changes, then
1720 * don't do a full re-init of the chip, just update
1723 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1724 ((ifp->if_flags ^ sc->nfe_if_flags) &
1725 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1728 nfe_init_locked(sc);
1730 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1733 sc->nfe_if_flags = ifp->if_flags;
1739 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1748 mii = device_get_softc(sc->nfe_miibus);
1749 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1752 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1753 #ifdef DEVICE_POLLING
1754 if ((mask & IFCAP_POLLING) != 0) {
1755 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1756 error = ether_poll_register(nfe_poll, ifp);
1760 nfe_disable_intr(sc);
1761 ifp->if_capenable |= IFCAP_POLLING;
1764 error = ether_poll_deregister(ifp);
1765 /* Enable interrupt even in error case */
1767 nfe_enable_intr(sc);
1768 ifp->if_capenable &= ~IFCAP_POLLING;
1772 #endif /* DEVICE_POLLING */
1773 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1774 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1775 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1776 if ((mask & IFCAP_TXCSUM) != 0 &&
1777 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1778 ifp->if_capenable ^= IFCAP_TXCSUM;
1779 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1780 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1782 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1784 if ((mask & IFCAP_RXCSUM) != 0 &&
1785 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1786 ifp->if_capenable ^= IFCAP_RXCSUM;
1789 if ((mask & IFCAP_TSO4) != 0 &&
1790 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1791 ifp->if_capenable ^= IFCAP_TSO4;
1792 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1793 ifp->if_hwassist |= CSUM_TSO;
1795 ifp->if_hwassist &= ~CSUM_TSO;
1797 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1798 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1799 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1800 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1801 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1802 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1803 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1804 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
1809 * It seems that VLAN stripping requires Rx checksum offload.
1810 * Unfortunately FreeBSD has no way to disable only Rx side
1811 * VLAN stripping. So when we know Rx checksum offload is
1812 * disabled turn entire hardware VLAN assist off.
1814 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) {
1815 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
1817 ifp->if_capenable &= ~(IFCAP_VLAN_HWTAGGING |
1820 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1821 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1824 VLAN_CAPABILITIES(ifp);
1827 error = ether_ioctl(ifp, cmd, data);
1838 struct nfe_softc *sc;
1841 sc = (struct nfe_softc *)arg;
1843 status = NFE_READ(sc, sc->nfe_irq_status);
1844 if (status == 0 || status == 0xffffffff)
1845 return (FILTER_STRAY);
1846 nfe_disable_intr(sc);
1847 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1849 return (FILTER_HANDLED);
1854 nfe_int_task(void *arg, int pending)
1856 struct nfe_softc *sc = arg;
1857 struct ifnet *ifp = sc->nfe_ifp;
1863 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1864 nfe_enable_intr(sc);
1866 return; /* not for us */
1868 NFE_WRITE(sc, sc->nfe_irq_status, r);
1870 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1872 #ifdef DEVICE_POLLING
1873 if (ifp->if_capenable & IFCAP_POLLING) {
1879 if (r & NFE_IRQ_LINK) {
1880 NFE_READ(sc, NFE_PHY_STATUS);
1881 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1882 DPRINTF(sc, "link state changed\n");
1885 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1887 nfe_disable_intr(sc);
1893 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1894 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1896 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1900 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1901 nfe_start_locked(ifp);
1905 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1906 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1910 /* Reenable interrupts. */
1911 nfe_enable_intr(sc);
1915 static __inline void
1916 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1918 struct nfe_desc32 *desc32;
1919 struct nfe_desc64 *desc64;
1920 struct nfe_rx_data *data;
1923 data = &sc->rxq.data[idx];
1926 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1927 desc64 = &sc->rxq.desc64[idx];
1928 /* VLAN packet may have overwritten it. */
1929 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1930 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1931 desc64->length = htole16(m->m_len);
1932 desc64->flags = htole16(NFE_RX_READY);
1934 desc32 = &sc->rxq.desc32[idx];
1935 desc32->length = htole16(m->m_len);
1936 desc32->flags = htole16(NFE_RX_READY);
1941 static __inline void
1942 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1944 struct nfe_desc32 *desc32;
1945 struct nfe_desc64 *desc64;
1946 struct nfe_rx_data *data;
1949 data = &sc->jrxq.jdata[idx];
1952 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1953 desc64 = &sc->jrxq.jdesc64[idx];
1954 /* VLAN packet may have overwritten it. */
1955 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1956 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1957 desc64->length = htole16(m->m_len);
1958 desc64->flags = htole16(NFE_RX_READY);
1960 desc32 = &sc->jrxq.jdesc32[idx];
1961 desc32->length = htole16(m->m_len);
1962 desc32->flags = htole16(NFE_RX_READY);
1968 nfe_newbuf(struct nfe_softc *sc, int idx)
1970 struct nfe_rx_data *data;
1971 struct nfe_desc32 *desc32;
1972 struct nfe_desc64 *desc64;
1974 bus_dma_segment_t segs[1];
1978 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1982 m->m_len = m->m_pkthdr.len = MCLBYTES;
1983 m_adj(m, ETHER_ALIGN);
1985 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1986 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1990 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1992 data = &sc->rxq.data[idx];
1993 if (data->m != NULL) {
1994 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1995 BUS_DMASYNC_POSTREAD);
1996 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1998 map = data->rx_data_map;
1999 data->rx_data_map = sc->rxq.rx_spare_map;
2000 sc->rxq.rx_spare_map = map;
2001 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2002 BUS_DMASYNC_PREREAD);
2003 data->paddr = segs[0].ds_addr;
2005 /* update mapping address in h/w descriptor */
2006 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2007 desc64 = &sc->rxq.desc64[idx];
2008 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2009 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2010 desc64->length = htole16(segs[0].ds_len);
2011 desc64->flags = htole16(NFE_RX_READY);
2013 desc32 = &sc->rxq.desc32[idx];
2014 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2015 desc32->length = htole16(segs[0].ds_len);
2016 desc32->flags = htole16(NFE_RX_READY);
2024 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2026 struct nfe_rx_data *data;
2027 struct nfe_desc32 *desc32;
2028 struct nfe_desc64 *desc64;
2030 bus_dma_segment_t segs[1];
2034 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2037 if ((m->m_flags & M_EXT) == 0) {
2041 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2042 m_adj(m, ETHER_ALIGN);
2044 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2045 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2049 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2051 data = &sc->jrxq.jdata[idx];
2052 if (data->m != NULL) {
2053 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2054 BUS_DMASYNC_POSTREAD);
2055 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2057 map = data->rx_data_map;
2058 data->rx_data_map = sc->jrxq.jrx_spare_map;
2059 sc->jrxq.jrx_spare_map = map;
2060 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2061 BUS_DMASYNC_PREREAD);
2062 data->paddr = segs[0].ds_addr;
2064 /* update mapping address in h/w descriptor */
2065 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2066 desc64 = &sc->jrxq.jdesc64[idx];
2067 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2068 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2069 desc64->length = htole16(segs[0].ds_len);
2070 desc64->flags = htole16(NFE_RX_READY);
2072 desc32 = &sc->jrxq.jdesc32[idx];
2073 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2074 desc32->length = htole16(segs[0].ds_len);
2075 desc32->flags = htole16(NFE_RX_READY);
2083 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2085 struct ifnet *ifp = sc->nfe_ifp;
2086 struct nfe_desc32 *desc32;
2087 struct nfe_desc64 *desc64;
2088 struct nfe_rx_data *data;
2091 int len, prog, rx_npkts;
2095 NFE_LOCK_ASSERT(sc);
2097 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2098 BUS_DMASYNC_POSTREAD);
2100 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2105 data = &sc->rxq.data[sc->rxq.cur];
2107 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2108 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2109 vtag = le32toh(desc64->physaddr[1]);
2110 flags = le16toh(desc64->flags);
2111 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2113 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2114 flags = le16toh(desc32->flags);
2115 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2118 if (flags & NFE_RX_READY)
2121 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2122 if (!(flags & NFE_RX_VALID_V1)) {
2124 nfe_discard_rxbuf(sc, sc->rxq.cur);
2127 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2128 flags &= ~NFE_RX_ERROR;
2129 len--; /* fix buffer length */
2132 if (!(flags & NFE_RX_VALID_V2)) {
2134 nfe_discard_rxbuf(sc, sc->rxq.cur);
2138 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2139 flags &= ~NFE_RX_ERROR;
2140 len--; /* fix buffer length */
2144 if (flags & NFE_RX_ERROR) {
2146 nfe_discard_rxbuf(sc, sc->rxq.cur);
2151 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2153 nfe_discard_rxbuf(sc, sc->rxq.cur);
2157 if ((vtag & NFE_RX_VTAG) != 0 &&
2158 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2159 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2160 m->m_flags |= M_VLANTAG;
2163 m->m_pkthdr.len = m->m_len = len;
2164 m->m_pkthdr.rcvif = ifp;
2166 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2167 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2168 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2169 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2170 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2171 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2172 m->m_pkthdr.csum_flags |=
2173 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2174 m->m_pkthdr.csum_data = 0xffff;
2182 (*ifp->if_input)(ifp, m);
2188 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2189 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2191 if (rx_npktsp != NULL)
2192 *rx_npktsp = rx_npkts;
2193 return (count > 0 ? 0 : EAGAIN);
2198 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2200 struct ifnet *ifp = sc->nfe_ifp;
2201 struct nfe_desc32 *desc32;
2202 struct nfe_desc64 *desc64;
2203 struct nfe_rx_data *data;
2206 int len, prog, rx_npkts;
2210 NFE_LOCK_ASSERT(sc);
2212 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2213 BUS_DMASYNC_POSTREAD);
2215 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2221 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2223 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2224 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2225 vtag = le32toh(desc64->physaddr[1]);
2226 flags = le16toh(desc64->flags);
2227 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2229 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2230 flags = le16toh(desc32->flags);
2231 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2234 if (flags & NFE_RX_READY)
2237 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2238 if (!(flags & NFE_RX_VALID_V1)) {
2240 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2243 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2244 flags &= ~NFE_RX_ERROR;
2245 len--; /* fix buffer length */
2248 if (!(flags & NFE_RX_VALID_V2)) {
2250 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2254 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2255 flags &= ~NFE_RX_ERROR;
2256 len--; /* fix buffer length */
2260 if (flags & NFE_RX_ERROR) {
2262 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2267 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2269 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2273 if ((vtag & NFE_RX_VTAG) != 0 &&
2274 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2275 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2276 m->m_flags |= M_VLANTAG;
2279 m->m_pkthdr.len = m->m_len = len;
2280 m->m_pkthdr.rcvif = ifp;
2282 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2283 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2284 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2285 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2286 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2287 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2288 m->m_pkthdr.csum_flags |=
2289 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2290 m->m_pkthdr.csum_data = 0xffff;
2298 (*ifp->if_input)(ifp, m);
2304 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2305 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2307 if (rx_npktsp != NULL)
2308 *rx_npktsp = rx_npkts;
2309 return (count > 0 ? 0 : EAGAIN);
2314 nfe_txeof(struct nfe_softc *sc)
2316 struct ifnet *ifp = sc->nfe_ifp;
2317 struct nfe_desc32 *desc32;
2318 struct nfe_desc64 *desc64;
2319 struct nfe_tx_data *data = NULL;
2323 NFE_LOCK_ASSERT(sc);
2325 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2326 BUS_DMASYNC_POSTREAD);
2329 for (cons = sc->txq.next; cons != sc->txq.cur;
2330 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2331 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2332 desc64 = &sc->txq.desc64[cons];
2333 flags = le16toh(desc64->flags);
2335 desc32 = &sc->txq.desc32[cons];
2336 flags = le16toh(desc32->flags);
2339 if (flags & NFE_TX_VALID)
2344 data = &sc->txq.data[cons];
2346 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2347 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2349 if ((flags & NFE_TX_ERROR_V1) != 0) {
2350 device_printf(sc->nfe_dev,
2351 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2357 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2359 if ((flags & NFE_TX_ERROR_V2) != 0) {
2360 device_printf(sc->nfe_dev,
2361 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2367 /* last fragment of the mbuf chain transmitted */
2368 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2369 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2370 BUS_DMASYNC_POSTWRITE);
2371 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2377 sc->nfe_force_tx = 0;
2378 sc->txq.next = cons;
2379 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2380 if (sc->txq.queued == 0)
2381 sc->nfe_watchdog_timer = 0;
2386 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2388 struct nfe_desc32 *desc32 = NULL;
2389 struct nfe_desc64 *desc64 = NULL;
2391 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2392 int error, i, nsegs, prod, si;
2394 uint16_t cflags, flags;
2397 prod = si = sc->txq.cur;
2398 map = sc->txq.data[prod].tx_data_map;
2400 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2401 &nsegs, BUS_DMA_NOWAIT);
2402 if (error == EFBIG) {
2403 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2410 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2411 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2417 } else if (error != 0)
2425 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2426 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2433 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2434 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2436 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2437 cflags |= NFE_TX_TSO;
2438 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2439 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2440 cflags |= NFE_TX_IP_CSUM;
2441 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2442 cflags |= NFE_TX_TCP_UDP_CSUM;
2443 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2444 cflags |= NFE_TX_TCP_UDP_CSUM;
2447 for (i = 0; i < nsegs; i++) {
2448 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2449 desc64 = &sc->txq.desc64[prod];
2450 desc64->physaddr[0] =
2451 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2452 desc64->physaddr[1] =
2453 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2455 desc64->length = htole16(segs[i].ds_len - 1);
2456 desc64->flags = htole16(flags);
2458 desc32 = &sc->txq.desc32[prod];
2460 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2461 desc32->length = htole16(segs[i].ds_len - 1);
2462 desc32->flags = htole16(flags);
2466 * Setting of the valid bit in the first descriptor is
2467 * deferred until the whole chain is fully setup.
2469 flags |= NFE_TX_VALID;
2472 NFE_INC(prod, NFE_TX_RING_COUNT);
2476 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2477 * csum flags, vtag and TSO belong to the first fragment only.
2479 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2480 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2481 desc64 = &sc->txq.desc64[si];
2482 if ((m->m_flags & M_VLANTAG) != 0)
2483 desc64->vtag = htole32(NFE_TX_VTAG |
2484 m->m_pkthdr.ether_vtag);
2485 if (tso_segsz != 0) {
2488 * The following indicates the descriptor element
2489 * is a 32bit quantity.
2491 desc64->length |= htole16((uint16_t)tso_segsz);
2492 desc64->flags |= htole16(tso_segsz >> 16);
2495 * finally, set the valid/checksum/TSO bit in the first
2498 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2500 if (sc->nfe_flags & NFE_JUMBO_SUP)
2501 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2503 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2504 desc32 = &sc->txq.desc32[si];
2505 if (tso_segsz != 0) {
2508 * The following indicates the descriptor element
2509 * is a 32bit quantity.
2511 desc32->length |= htole16((uint16_t)tso_segsz);
2512 desc32->flags |= htole16(tso_segsz >> 16);
2515 * finally, set the valid/checksum/TSO bit in the first
2518 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2522 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2523 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2524 sc->txq.data[prod].tx_data_map = map;
2525 sc->txq.data[prod].m = m;
2527 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2534 nfe_setmulti(struct nfe_softc *sc)
2536 struct ifnet *ifp = sc->nfe_ifp;
2537 struct ifmultiaddr *ifma;
2540 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2541 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2545 NFE_LOCK_ASSERT(sc);
2547 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2548 bzero(addr, ETHER_ADDR_LEN);
2549 bzero(mask, ETHER_ADDR_LEN);
2553 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2554 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2556 if_maddr_rlock(ifp);
2557 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2560 if (ifma->ifma_addr->sa_family != AF_LINK)
2563 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2564 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2565 u_int8_t mcaddr = addrp[i];
2570 if_maddr_runlock(ifp);
2572 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2577 addr[0] |= 0x01; /* make sure multicast bit is set */
2579 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2580 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2581 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2582 addr[5] << 8 | addr[4]);
2583 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2584 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2585 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2586 mask[5] << 8 | mask[4]);
2588 filter = NFE_READ(sc, NFE_RXFILTER);
2589 filter &= NFE_PFF_RX_PAUSE;
2590 filter |= NFE_RXFILTER_MAGIC;
2591 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2592 NFE_WRITE(sc, NFE_RXFILTER, filter);
2597 nfe_start(struct ifnet *ifp)
2599 struct nfe_softc *sc = ifp->if_softc;
2602 nfe_start_locked(ifp);
2607 nfe_start_locked(struct ifnet *ifp)
2609 struct nfe_softc *sc = ifp->if_softc;
2613 NFE_LOCK_ASSERT(sc);
2615 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2616 IFF_DRV_RUNNING || sc->nfe_link == 0)
2619 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2620 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2624 if (nfe_encap(sc, &m0) != 0) {
2627 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2628 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2632 ETHER_BPF_MTAP(ifp, m0);
2636 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2637 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2640 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2643 * Set a timeout in case the chip goes out to lunch.
2645 sc->nfe_watchdog_timer = 5;
2651 nfe_watchdog(struct ifnet *ifp)
2653 struct nfe_softc *sc = ifp->if_softc;
2655 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2658 /* Check if we've lost Tx completion interrupt. */
2660 if (sc->txq.queued == 0) {
2661 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2663 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2664 nfe_start_locked(ifp);
2667 /* Check if we've lost start Tx command. */
2669 if (sc->nfe_force_tx <= 3) {
2671 * If this is the case for watchdog timeout, the following
2672 * code should go to nfe_txeof().
2674 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2677 sc->nfe_force_tx = 0;
2679 if_printf(ifp, "watchdog timeout\n");
2681 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2683 nfe_init_locked(sc);
2690 struct nfe_softc *sc = xsc;
2693 nfe_init_locked(sc);
2699 nfe_init_locked(void *xsc)
2701 struct nfe_softc *sc = xsc;
2702 struct ifnet *ifp = sc->nfe_ifp;
2703 struct mii_data *mii;
2707 NFE_LOCK_ASSERT(sc);
2709 mii = device_get_softc(sc->nfe_miibus);
2711 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2716 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2718 nfe_init_tx_ring(sc, &sc->txq);
2719 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2720 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2722 error = nfe_init_rx_ring(sc, &sc->rxq);
2724 device_printf(sc->nfe_dev,
2725 "initialization failed: no memory for rx buffers\n");
2731 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2732 val |= NFE_MAC_ADDR_INORDER;
2733 NFE_WRITE(sc, NFE_TX_UNK, val);
2734 NFE_WRITE(sc, NFE_STATUS, 0);
2736 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2737 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2739 sc->rxtxctl = NFE_RXTX_BIT2;
2740 if (sc->nfe_flags & NFE_40BIT_ADDR)
2741 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2742 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2743 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2745 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2746 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2747 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2748 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2750 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2752 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2754 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2755 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2757 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2759 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2761 /* set MAC address */
2762 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2764 /* tell MAC where rings are in memory */
2765 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2766 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2767 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2768 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2769 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2771 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2772 NFE_ADDR_HI(sc->rxq.physaddr));
2773 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2774 NFE_ADDR_LO(sc->rxq.physaddr));
2776 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2777 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2779 NFE_WRITE(sc, NFE_RING_SIZE,
2780 (NFE_RX_RING_COUNT - 1) << 16 |
2781 (NFE_TX_RING_COUNT - 1));
2783 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2785 /* force MAC to wakeup */
2786 val = NFE_READ(sc, NFE_PWR_STATE);
2787 if ((val & NFE_PWR_WAKEUP) == 0)
2788 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2790 val = NFE_READ(sc, NFE_PWR_STATE);
2791 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2794 /* configure interrupts coalescing/mitigation */
2795 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2797 /* no interrupt mitigation: one interrupt per packet */
2798 NFE_WRITE(sc, NFE_IMTIMER, 970);
2801 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2802 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2803 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2805 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2806 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2808 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2810 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2812 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2813 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2815 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2821 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2824 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2826 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2828 /* Clear hardware stats. */
2829 nfe_stats_clear(sc);
2831 #ifdef DEVICE_POLLING
2832 if (ifp->if_capenable & IFCAP_POLLING)
2833 nfe_disable_intr(sc);
2837 nfe_enable_intr(sc); /* enable interrupts */
2839 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2840 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2845 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2850 nfe_stop(struct ifnet *ifp)
2852 struct nfe_softc *sc = ifp->if_softc;
2853 struct nfe_rx_ring *rx_ring;
2854 struct nfe_jrx_ring *jrx_ring;
2855 struct nfe_tx_ring *tx_ring;
2856 struct nfe_rx_data *rdata;
2857 struct nfe_tx_data *tdata;
2860 NFE_LOCK_ASSERT(sc);
2862 sc->nfe_watchdog_timer = 0;
2863 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2865 callout_stop(&sc->nfe_stat_ch);
2868 NFE_WRITE(sc, NFE_TX_CTL, 0);
2871 NFE_WRITE(sc, NFE_RX_CTL, 0);
2873 /* disable interrupts */
2874 nfe_disable_intr(sc);
2878 /* free Rx and Tx mbufs still in the queues. */
2880 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2881 rdata = &rx_ring->data[i];
2882 if (rdata->m != NULL) {
2883 bus_dmamap_sync(rx_ring->rx_data_tag,
2884 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2885 bus_dmamap_unload(rx_ring->rx_data_tag,
2886 rdata->rx_data_map);
2892 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2893 jrx_ring = &sc->jrxq;
2894 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2895 rdata = &jrx_ring->jdata[i];
2896 if (rdata->m != NULL) {
2897 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2898 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2899 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2900 rdata->rx_data_map);
2908 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2909 tdata = &tx_ring->data[i];
2910 if (tdata->m != NULL) {
2911 bus_dmamap_sync(tx_ring->tx_data_tag,
2912 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2913 bus_dmamap_unload(tx_ring->tx_data_tag,
2914 tdata->tx_data_map);
2919 /* Update hardware stats. */
2920 nfe_stats_update(sc);
2925 nfe_ifmedia_upd(struct ifnet *ifp)
2927 struct nfe_softc *sc = ifp->if_softc;
2928 struct mii_data *mii;
2931 mii = device_get_softc(sc->nfe_miibus);
2940 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2942 struct nfe_softc *sc;
2943 struct mii_data *mii;
2948 mii = device_get_softc(sc->nfe_miibus);
2951 ifmr->ifm_active = mii->mii_media_active;
2952 ifmr->ifm_status = mii->mii_media_status;
2960 struct nfe_softc *sc;
2961 struct mii_data *mii;
2964 sc = (struct nfe_softc *)xsc;
2966 NFE_LOCK_ASSERT(sc);
2970 mii = device_get_softc(sc->nfe_miibus);
2972 nfe_stats_update(sc);
2974 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2979 nfe_shutdown(device_t dev)
2982 return (nfe_suspend(dev));
2987 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2991 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2992 val = NFE_READ(sc, NFE_MACADDR_LO);
2993 addr[0] = (val >> 8) & 0xff;
2994 addr[1] = (val & 0xff);
2996 val = NFE_READ(sc, NFE_MACADDR_HI);
2997 addr[2] = (val >> 24) & 0xff;
2998 addr[3] = (val >> 16) & 0xff;
2999 addr[4] = (val >> 8) & 0xff;
3000 addr[5] = (val & 0xff);
3002 val = NFE_READ(sc, NFE_MACADDR_LO);
3003 addr[5] = (val >> 8) & 0xff;
3004 addr[4] = (val & 0xff);
3006 val = NFE_READ(sc, NFE_MACADDR_HI);
3007 addr[3] = (val >> 24) & 0xff;
3008 addr[2] = (val >> 16) & 0xff;
3009 addr[1] = (val >> 8) & 0xff;
3010 addr[0] = (val & 0xff);
3016 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3019 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3020 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3021 addr[1] << 8 | addr[0]);
3026 * Map a single buffer address.
3030 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3032 struct nfe_dmamap_arg *ctx;
3037 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3039 ctx = (struct nfe_dmamap_arg *)arg;
3040 ctx->nfe_busaddr = segs[0].ds_addr;
3045 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3051 value = *(int *)arg1;
3052 error = sysctl_handle_int(oidp, &value, 0, req);
3053 if (error || !req->newptr)
3055 if (value < low || value > high)
3057 *(int *)arg1 = value;
3064 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3067 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3072 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3073 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3074 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3075 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3078 nfe_sysctl_node(struct nfe_softc *sc)
3080 struct sysctl_ctx_list *ctx;
3081 struct sysctl_oid_list *child, *parent;
3082 struct sysctl_oid *tree;
3083 struct nfe_hw_stats *stats;
3086 stats = &sc->nfe_stats;
3087 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3088 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3089 SYSCTL_ADD_PROC(ctx, child,
3090 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3091 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3092 "max number of Rx events to process");
3094 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3095 error = resource_int_value(device_get_name(sc->nfe_dev),
3096 device_get_unit(sc->nfe_dev), "process_limit",
3097 &sc->nfe_process_limit);
3099 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3100 sc->nfe_process_limit > NFE_PROC_MAX) {
3101 device_printf(sc->nfe_dev,
3102 "process_limit value out of range; "
3103 "using default: %d\n", NFE_PROC_DEFAULT);
3104 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3108 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3111 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3112 NULL, "NFE statistics");
3113 parent = SYSCTL_CHILDREN(tree);
3115 /* Rx statistics. */
3116 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3117 NULL, "Rx MAC statistics");
3118 child = SYSCTL_CHILDREN(tree);
3120 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3121 &stats->rx_frame_errors, "Framing Errors");
3122 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3123 &stats->rx_extra_bytes, "Extra Bytes");
3124 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3125 &stats->rx_late_cols, "Late Collisions");
3126 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3127 &stats->rx_runts, "Runts");
3128 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3129 &stats->rx_jumbos, "Jumbos");
3130 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3131 &stats->rx_fifo_overuns, "FIFO Overruns");
3132 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3133 &stats->rx_crc_errors, "CRC Errors");
3134 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3135 &stats->rx_fae, "Frame Alignment Errors");
3136 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3137 &stats->rx_len_errors, "Length Errors");
3138 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3139 &stats->rx_unicast, "Unicast Frames");
3140 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3141 &stats->rx_multicast, "Multicast Frames");
3142 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3143 &stats->rx_broadcast, "Broadcast Frames");
3144 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3145 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3146 &stats->rx_octets, "Octets");
3147 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3148 &stats->rx_pause, "Pause frames");
3149 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3150 &stats->rx_drops, "Drop frames");
3153 /* Tx statistics. */
3154 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3155 NULL, "Tx MAC statistics");
3156 child = SYSCTL_CHILDREN(tree);
3157 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3158 &stats->tx_octets, "Octets");
3159 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3160 &stats->tx_zero_rexmits, "Zero Retransmits");
3161 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3162 &stats->tx_one_rexmits, "One Retransmits");
3163 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3164 &stats->tx_multi_rexmits, "Multiple Retransmits");
3165 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3166 &stats->tx_late_cols, "Late Collisions");
3167 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3168 &stats->tx_fifo_underuns, "FIFO Underruns");
3169 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3170 &stats->tx_carrier_losts, "Carrier Losts");
3171 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3172 &stats->tx_excess_deferals, "Excess Deferrals");
3173 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3174 &stats->tx_retry_errors, "Retry Errors");
3175 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3176 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3177 &stats->tx_deferals, "Deferrals");
3178 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3179 &stats->tx_frames, "Frames");
3180 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3181 &stats->tx_pause, "Pause Frames");
3183 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3184 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3185 &stats->tx_deferals, "Unicast Frames");
3186 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3187 &stats->tx_frames, "Multicast Frames");
3188 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3189 &stats->tx_pause, "Broadcast Frames");
3193 #undef NFE_SYSCTL_STAT_ADD32
3194 #undef NFE_SYSCTL_STAT_ADD64
3197 nfe_stats_clear(struct nfe_softc *sc)
3201 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3202 mib_cnt = NFE_NUM_MIB_STATV1;
3203 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3204 mib_cnt = NFE_NUM_MIB_STATV2;
3208 for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3209 NFE_READ(sc, NFE_TX_OCTET + i);
3211 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3212 NFE_READ(sc, NFE_TX_UNICAST);
3213 NFE_READ(sc, NFE_TX_MULTICAST);
3214 NFE_READ(sc, NFE_TX_BROADCAST);
3219 nfe_stats_update(struct nfe_softc *sc)
3221 struct nfe_hw_stats *stats;
3223 NFE_LOCK_ASSERT(sc);
3225 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3228 stats = &sc->nfe_stats;
3229 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3230 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3231 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3232 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3233 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3234 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3235 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3236 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3237 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3238 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3239 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3240 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3241 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3242 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3243 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3244 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3245 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3246 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3247 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3248 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3249 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3251 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3252 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3253 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3254 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3255 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3256 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3257 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3260 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3261 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3262 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3263 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3269 nfe_set_linkspeed(struct nfe_softc *sc)
3271 struct mii_softc *miisc;
3272 struct mii_data *mii;
3275 NFE_LOCK_ASSERT(sc);
3277 mii = device_get_softc(sc->nfe_miibus);
3280 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3281 (IFM_ACTIVE | IFM_AVALID)) {
3282 switch IFM_SUBTYPE(mii->mii_media_active) {
3293 miisc = LIST_FIRST(&mii->mii_phys);
3294 phyno = miisc->mii_phy;
3295 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3297 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3298 nfe_miibus_writereg(sc->nfe_dev, phyno,
3299 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3300 nfe_miibus_writereg(sc->nfe_dev, phyno,
3301 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3305 * Poll link state until nfe(4) get a 10/100Mbps link.
3307 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3309 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3310 == (IFM_ACTIVE | IFM_AVALID)) {
3311 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3314 nfe_mac_config(sc, mii);
3321 pause("nfelnk", hz);
3324 if (i == MII_ANEGTICKS_GIGE)
3325 device_printf(sc->nfe_dev,
3326 "establishing a link failed, WOL may not work!");
3329 * No link, force MAC to have 100Mbps, full-duplex link.
3330 * This is the last resort and may/may not work.
3332 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3333 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3334 nfe_mac_config(sc, mii);
3339 nfe_set_wol(struct nfe_softc *sc)
3346 NFE_LOCK_ASSERT(sc);
3348 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3351 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
3352 wolctl = NFE_WOL_MAGIC;
3355 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3356 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
3357 nfe_set_linkspeed(sc);
3358 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3359 NFE_WRITE(sc, NFE_PWR2_CTL,
3360 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3362 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3363 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3364 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3367 /* Request PME if WOL is requested. */
3368 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3369 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3370 if ((ifp->if_capenable & IFCAP_WOL) != 0)
3371 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3372 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);