1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_var.h>
45 #include <net/if_arp.h>
46 #include <net/ethernet.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49 #include <net/if_types.h>
50 #include <net/if_vlan_var.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
65 #include <dev/nfe/if_nfereg.h>
66 #include <dev/nfe/if_nfevar.h>
68 MODULE_DEPEND(nfe, pci, 1, 1, 1);
69 MODULE_DEPEND(nfe, ether, 1, 1, 1);
70 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
72 /* "device miibus" required. See GENERIC if you get errors here. */
73 #include "miibus_if.h"
75 static int nfe_probe(device_t);
76 static int nfe_attach(device_t);
77 static int nfe_detach(device_t);
78 static int nfe_suspend(device_t);
79 static int nfe_resume(device_t);
80 static int nfe_shutdown(device_t);
81 static int nfe_can_use_msix(struct nfe_softc *);
82 static int nfe_detect_msik9(struct nfe_softc *);
83 static void nfe_power(struct nfe_softc *);
84 static int nfe_miibus_readreg(device_t, int, int);
85 static int nfe_miibus_writereg(device_t, int, int, int);
86 static void nfe_miibus_statchg(device_t);
87 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
88 static void nfe_set_intr(struct nfe_softc *);
89 static __inline void nfe_enable_intr(struct nfe_softc *);
90 static __inline void nfe_disable_intr(struct nfe_softc *);
91 static int nfe_ioctl(if_t, u_long, caddr_t);
92 static void nfe_alloc_msix(struct nfe_softc *, int);
93 static int nfe_intr(void *);
94 static void nfe_int_task(void *, int);
95 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
96 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
97 static int nfe_newbuf(struct nfe_softc *, int);
98 static int nfe_jnewbuf(struct nfe_softc *, int);
99 static int nfe_rxeof(struct nfe_softc *, int, int *);
100 static int nfe_jrxeof(struct nfe_softc *, int, int *);
101 static void nfe_txeof(struct nfe_softc *);
102 static int nfe_encap(struct nfe_softc *, struct mbuf **);
103 static void nfe_setmulti(struct nfe_softc *);
104 static void nfe_start(if_t);
105 static void nfe_start_locked(if_t);
106 static void nfe_watchdog(if_t);
107 static void nfe_init(void *);
108 static void nfe_init_locked(void *);
109 static void nfe_stop(if_t);
110 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
115 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
116 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
118 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
119 static int nfe_ifmedia_upd(if_t);
120 static void nfe_ifmedia_sts(if_t, struct ifmediareq *);
121 static void nfe_tick(void *);
122 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
123 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
124 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
126 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
127 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
128 static void nfe_sysctl_node(struct nfe_softc *);
129 static void nfe_stats_clear(struct nfe_softc *);
130 static void nfe_stats_update(struct nfe_softc *);
131 static void nfe_set_linkspeed(struct nfe_softc *);
132 static void nfe_set_wol(struct nfe_softc *);
135 static int nfedebug = 0;
136 #define DPRINTF(sc, ...) do { \
138 device_printf((sc)->nfe_dev, __VA_ARGS__); \
140 #define DPRINTFN(sc, n, ...) do { \
141 if (nfedebug >= (n)) \
142 device_printf((sc)->nfe_dev, __VA_ARGS__); \
145 #define DPRINTF(sc, ...)
146 #define DPRINTFN(sc, n, ...)
149 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
150 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
151 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
154 static int msi_disable = 0;
155 static int msix_disable = 0;
156 static int jumbo_disable = 0;
157 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
158 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
159 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
161 static device_method_t nfe_methods[] = {
162 /* Device interface */
163 DEVMETHOD(device_probe, nfe_probe),
164 DEVMETHOD(device_attach, nfe_attach),
165 DEVMETHOD(device_detach, nfe_detach),
166 DEVMETHOD(device_suspend, nfe_suspend),
167 DEVMETHOD(device_resume, nfe_resume),
168 DEVMETHOD(device_shutdown, nfe_shutdown),
171 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
172 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
173 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
178 static driver_t nfe_driver = {
181 sizeof(struct nfe_softc)
184 static devclass_t nfe_devclass;
186 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
187 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
189 static struct nfe_type nfe_devs[] = {
190 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
191 "NVIDIA nForce MCP Networking Adapter"},
192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
193 "NVIDIA nForce2 MCP2 Networking Adapter"},
194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
195 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
197 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
199 "NVIDIA nForce3 MCP3 Networking Adapter"},
200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
201 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
203 "NVIDIA nForce3 MCP7 Networking Adapter"},
204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
205 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
207 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
209 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
213 "NVIDIA nForce 430 MCP12 Networking Adapter"},
214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
215 "NVIDIA nForce 430 MCP13 Networking Adapter"},
216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
217 "NVIDIA nForce MCP55 Networking Adapter"},
218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
219 "NVIDIA nForce MCP55 Networking Adapter"},
220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
221 "NVIDIA nForce MCP61 Networking Adapter"},
222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
223 "NVIDIA nForce MCP61 Networking Adapter"},
224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
225 "NVIDIA nForce MCP61 Networking Adapter"},
226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
227 "NVIDIA nForce MCP61 Networking Adapter"},
228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
229 "NVIDIA nForce MCP65 Networking Adapter"},
230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
231 "NVIDIA nForce MCP65 Networking Adapter"},
232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
233 "NVIDIA nForce MCP65 Networking Adapter"},
234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
235 "NVIDIA nForce MCP65 Networking Adapter"},
236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
237 "NVIDIA nForce MCP67 Networking Adapter"},
238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
239 "NVIDIA nForce MCP67 Networking Adapter"},
240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
241 "NVIDIA nForce MCP67 Networking Adapter"},
242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
243 "NVIDIA nForce MCP67 Networking Adapter"},
244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
245 "NVIDIA nForce MCP73 Networking Adapter"},
246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
247 "NVIDIA nForce MCP73 Networking Adapter"},
248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
249 "NVIDIA nForce MCP73 Networking Adapter"},
250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
251 "NVIDIA nForce MCP73 Networking Adapter"},
252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
253 "NVIDIA nForce MCP77 Networking Adapter"},
254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
255 "NVIDIA nForce MCP77 Networking Adapter"},
256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
257 "NVIDIA nForce MCP77 Networking Adapter"},
258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
259 "NVIDIA nForce MCP77 Networking Adapter"},
260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
261 "NVIDIA nForce MCP79 Networking Adapter"},
262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
263 "NVIDIA nForce MCP79 Networking Adapter"},
264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
265 "NVIDIA nForce MCP79 Networking Adapter"},
266 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
267 "NVIDIA nForce MCP79 Networking Adapter"},
268 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN,
269 "NVIDIA nForce MCP89 Networking Adapter"},
274 /* Probe for supported hardware ID's */
276 nfe_probe(device_t dev)
281 /* Check for matching PCI DEVICE ID's */
282 while (t->name != NULL) {
283 if ((pci_get_vendor(dev) == t->vid_id) &&
284 (pci_get_device(dev) == t->dev_id)) {
285 device_set_desc(dev, t->name);
286 return (BUS_PROBE_DEFAULT);
295 nfe_alloc_msix(struct nfe_softc *sc, int count)
300 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
302 if (sc->nfe_msix_res == NULL) {
303 device_printf(sc->nfe_dev,
304 "couldn't allocate MSIX table resource\n");
308 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
309 SYS_RES_MEMORY, &rid, RF_ACTIVE);
310 if (sc->nfe_msix_pba_res == NULL) {
311 device_printf(sc->nfe_dev,
312 "couldn't allocate MSIX PBA resource\n");
313 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
315 sc->nfe_msix_res = NULL;
319 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
320 if (count == NFE_MSI_MESSAGES) {
322 device_printf(sc->nfe_dev,
323 "Using %d MSIX messages\n", count);
327 device_printf(sc->nfe_dev,
328 "couldn't allocate MSIX\n");
329 pci_release_msi(sc->nfe_dev);
330 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
331 PCIR_BAR(3), sc->nfe_msix_pba_res);
332 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
333 PCIR_BAR(2), sc->nfe_msix_res);
334 sc->nfe_msix_pba_res = NULL;
335 sc->nfe_msix_res = NULL;
342 nfe_detect_msik9(struct nfe_softc *sc)
344 static const char *maker = "MSI";
345 static const char *product = "K9N6PGM2-V2 (MS-7309)";
350 m = kern_getenv("smbios.planar.maker");
351 p = kern_getenv("smbios.planar.product");
352 if (m != NULL && p != NULL) {
353 if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
366 nfe_attach(device_t dev)
368 struct nfe_softc *sc;
370 bus_addr_t dma_addr_max;
371 int error = 0, i, msic, phyloc, reg, rid;
373 sc = device_get_softc(dev);
376 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
378 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
380 pci_enable_busmaster(dev);
383 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
385 if (sc->nfe_res[0] == NULL) {
386 device_printf(dev, "couldn't map memory resources\n");
387 mtx_destroy(&sc->nfe_mtx);
391 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
394 v = pci_read_config(dev, reg + 0x08, 2);
395 /* Change max. read request size to 4096. */
398 pci_write_config(dev, reg + 0x08, v, 2);
400 v = pci_read_config(dev, reg + 0x0c, 2);
401 /* link capability */
403 width = pci_read_config(dev, reg + 0x12, 2);
404 /* negotiated link width */
405 width = (width >> 4) & 0x3f;
407 device_printf(sc->nfe_dev,
408 "warning, negotiated width of link(x%d) != "
409 "max. width of link(x%d)\n", width, v);
412 if (nfe_can_use_msix(sc) == 0) {
413 device_printf(sc->nfe_dev,
414 "MSI/MSI-X capability black-listed, will use INTx\n");
419 /* Allocate interrupt */
420 if (msix_disable == 0 || msi_disable == 0) {
421 if (msix_disable == 0 &&
422 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
423 nfe_alloc_msix(sc, msic);
424 if (msi_disable == 0 && sc->nfe_msix == 0 &&
425 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
426 pci_alloc_msi(dev, &msic) == 0) {
427 if (msic == NFE_MSI_MESSAGES) {
430 "Using %d MSI messages\n", msic);
433 pci_release_msi(dev);
437 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
439 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
440 RF_SHAREABLE | RF_ACTIVE);
441 if (sc->nfe_irq[0] == NULL) {
442 device_printf(dev, "couldn't allocate IRQ resources\n");
447 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
448 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
449 SYS_RES_IRQ, &rid, RF_ACTIVE);
450 if (sc->nfe_irq[i] == NULL) {
452 "couldn't allocate IRQ resources for "
453 "message %d\n", rid);
458 /* Map interrupts to vector 0. */
459 if (sc->nfe_msix != 0) {
460 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
461 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
462 } else if (sc->nfe_msi != 0) {
463 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
464 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
468 /* Set IRQ status/mask register. */
469 sc->nfe_irq_status = NFE_IRQ_STATUS;
470 sc->nfe_irq_mask = NFE_IRQ_MASK;
471 sc->nfe_intrs = NFE_IRQ_WANTED;
473 if (sc->nfe_msix != 0) {
474 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
475 sc->nfe_nointrs = NFE_IRQ_WANTED;
476 } else if (sc->nfe_msi != 0) {
477 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
478 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
481 sc->nfe_devid = pci_get_device(dev);
482 sc->nfe_revid = pci_get_revid(dev);
485 switch (sc->nfe_devid) {
486 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
487 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
488 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
489 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
490 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
492 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
493 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
494 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
496 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
497 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
498 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
499 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
500 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
503 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
504 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
505 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
506 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
509 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
510 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
511 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
512 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
513 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
514 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
515 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
516 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
517 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
518 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
519 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
520 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
521 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
522 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
524 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
525 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
526 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
527 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
528 /* XXX flow control */
529 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
530 NFE_CORRECT_MACADDR | NFE_MIB_V3;
532 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
533 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
534 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
535 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
536 case PCI_PRODUCT_NVIDIA_MCP89_LAN:
537 /* XXX flow control */
538 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
539 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
541 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
542 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
543 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
544 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
545 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
546 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
552 /* Check for reversed ethernet address */
553 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
554 sc->nfe_flags |= NFE_CORRECT_MACADDR;
555 nfe_get_macaddr(sc, sc->eaddr);
557 * Allocate the parent bus DMA tag appropriate for PCI.
559 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
560 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
561 dma_addr_max = NFE_DMA_MAXADDR;
562 error = bus_dma_tag_create(
563 bus_get_dma_tag(sc->nfe_dev), /* parent */
564 1, 0, /* alignment, boundary */
565 dma_addr_max, /* lowaddr */
566 BUS_SPACE_MAXADDR, /* highaddr */
567 NULL, NULL, /* filter, filterarg */
568 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
569 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
571 NULL, NULL, /* lockfunc, lockarg */
572 &sc->nfe_parent_tag);
576 ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER);
578 device_printf(dev, "can not if_gethandle()\n");
584 * Allocate Tx and Rx rings.
586 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
589 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
592 nfe_alloc_jrx_ring(sc, &sc->jrxq);
593 /* Create sysctl node. */
596 if_setsoftc(ifp, sc);
597 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
598 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
599 if_setioctlfn(ifp, nfe_ioctl);
600 if_setstartfn(ifp, nfe_start);
601 if_sethwassist(ifp, 0);
602 if_setcapabilities(ifp, 0);
603 if_setinitfn(ifp, nfe_init);
604 if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1);
605 if_setsendqready(ifp);
608 if (sc->nfe_flags & NFE_HW_CSUM) {
609 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
610 if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0);
612 if_setcapenable(ifp, if_getcapabilities(ifp));
614 sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
615 /* VLAN capability setup. */
616 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
617 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
618 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
619 if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0)
620 if_setcapabilitiesbit(ifp,
621 (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0);
624 if (pci_find_cap(dev, PCIY_PMG, ®) == 0)
625 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
626 if_setcapenable(ifp, if_getcapabilities(ifp));
629 * Tell the upper layer(s) we support long frames.
630 * Must appear after the call to ether_ifattach() because
631 * ether_ifattach() sets ifi_hdrlen to the default value.
633 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
635 #ifdef DEVICE_POLLING
636 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
640 phyloc = MII_PHY_ANY;
641 if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
642 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
643 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
644 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
645 if (nfe_detect_msik9(sc) != 0)
648 error = mii_attach(dev, &sc->nfe_miibus, ifp,
649 (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts,
650 BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE);
652 device_printf(dev, "attaching PHYs failed\n");
655 ether_ifattach(ifp, sc->eaddr);
657 NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
658 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
659 taskqueue_thread_enqueue, &sc->nfe_tq);
660 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
661 device_get_nameunit(sc->nfe_dev));
663 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
664 error = bus_setup_intr(dev, sc->nfe_irq[0],
665 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
666 &sc->nfe_intrhand[0]);
668 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
669 error = bus_setup_intr(dev, sc->nfe_irq[i],
670 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
671 &sc->nfe_intrhand[i]);
677 device_printf(dev, "couldn't set up irq\n");
678 taskqueue_free(sc->nfe_tq);
693 nfe_detach(device_t dev)
695 struct nfe_softc *sc;
697 uint8_t eaddr[ETHER_ADDR_LEN];
700 sc = device_get_softc(dev);
701 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
704 #ifdef DEVICE_POLLING
705 if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
706 ether_poll_deregister(ifp);
708 if (device_is_attached(dev)) {
711 if_setflagbits(ifp, 0, IFF_UP);
713 callout_drain(&sc->nfe_stat_ch);
718 /* restore ethernet address */
719 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
720 for (i = 0; i < ETHER_ADDR_LEN; i++) {
721 eaddr[i] = sc->eaddr[5 - i];
724 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
725 nfe_set_macaddr(sc, eaddr);
729 device_delete_child(dev, sc->nfe_miibus);
730 bus_generic_detach(dev);
731 if (sc->nfe_tq != NULL) {
732 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
733 taskqueue_free(sc->nfe_tq);
737 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
738 if (sc->nfe_intrhand[i] != NULL) {
739 bus_teardown_intr(dev, sc->nfe_irq[i],
740 sc->nfe_intrhand[i]);
741 sc->nfe_intrhand[i] = NULL;
745 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
746 if (sc->nfe_irq[0] != NULL)
747 bus_release_resource(dev, SYS_RES_IRQ, 0,
750 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
751 if (sc->nfe_irq[i] != NULL) {
752 bus_release_resource(dev, SYS_RES_IRQ, rid,
754 sc->nfe_irq[i] = NULL;
757 pci_release_msi(dev);
759 if (sc->nfe_msix_pba_res != NULL) {
760 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
761 sc->nfe_msix_pba_res);
762 sc->nfe_msix_pba_res = NULL;
764 if (sc->nfe_msix_res != NULL) {
765 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
767 sc->nfe_msix_res = NULL;
769 if (sc->nfe_res[0] != NULL) {
770 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
772 sc->nfe_res[0] = NULL;
775 nfe_free_tx_ring(sc, &sc->txq);
776 nfe_free_rx_ring(sc, &sc->rxq);
777 nfe_free_jrx_ring(sc, &sc->jrxq);
779 if (sc->nfe_parent_tag) {
780 bus_dma_tag_destroy(sc->nfe_parent_tag);
781 sc->nfe_parent_tag = NULL;
784 mtx_destroy(&sc->nfe_mtx);
791 nfe_suspend(device_t dev)
793 struct nfe_softc *sc;
795 sc = device_get_softc(dev);
798 nfe_stop(sc->nfe_ifp);
800 sc->nfe_suspended = 1;
808 nfe_resume(device_t dev)
810 struct nfe_softc *sc;
813 sc = device_get_softc(dev);
818 if (if_getflags(ifp) & IFF_UP)
820 sc->nfe_suspended = 0;
828 nfe_can_use_msix(struct nfe_softc *sc)
830 static struct msix_blacklist {
833 } msix_blacklists[] = {
834 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
837 struct msix_blacklist *mblp;
838 char *maker, *product;
839 int count, n, use_msix;
842 * Search base board manufacturer and product name table
843 * to see this system has a known MSI/MSI-X issue.
845 maker = kern_getenv("smbios.planar.maker");
846 product = kern_getenv("smbios.planar.product");
848 if (maker != NULL && product != NULL) {
849 count = nitems(msix_blacklists);
850 mblp = msix_blacklists;
851 for (n = 0; n < count; n++) {
852 if (strcmp(maker, mblp->maker) == 0 &&
853 strcmp(product, mblp->product) == 0) {
869 /* Take PHY/NIC out of powerdown, from Linux */
871 nfe_power(struct nfe_softc *sc)
875 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
877 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
878 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
880 NFE_WRITE(sc, NFE_MAC_RESET, 0);
882 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
883 pwr = NFE_READ(sc, NFE_PWR2_CTL);
884 pwr &= ~NFE_PWR2_WAKEUP_MASK;
885 if (sc->nfe_revid >= 0xa3 &&
886 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
887 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
888 pwr |= NFE_PWR2_REVA3;
889 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
894 nfe_miibus_statchg(device_t dev)
896 struct nfe_softc *sc;
897 struct mii_data *mii;
899 uint32_t rxctl, txctl;
901 sc = device_get_softc(dev);
903 mii = device_get_softc(sc->nfe_miibus);
907 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
908 (IFM_ACTIVE | IFM_AVALID)) {
909 switch (IFM_SUBTYPE(mii->mii_media_active)) {
920 nfe_mac_config(sc, mii);
921 txctl = NFE_READ(sc, NFE_TX_CTL);
922 rxctl = NFE_READ(sc, NFE_RX_CTL);
923 if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
924 txctl |= NFE_TX_START;
925 rxctl |= NFE_RX_START;
927 txctl &= ~NFE_TX_START;
928 rxctl &= ~NFE_RX_START;
930 NFE_WRITE(sc, NFE_TX_CTL, txctl);
931 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
936 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
938 uint32_t link, misc, phy, seed;
943 phy = NFE_READ(sc, NFE_PHY_IFACE);
944 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
946 seed = NFE_READ(sc, NFE_RNDSEED);
947 seed &= ~NFE_SEED_MASK;
949 misc = NFE_MISC1_MAGIC;
950 link = NFE_MEDIA_SET;
952 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
953 phy |= NFE_PHY_HDX; /* half-duplex */
954 misc |= NFE_MISC1_HDX;
957 switch (IFM_SUBTYPE(mii->mii_media_active)) {
958 case IFM_1000_T: /* full-duplex only */
959 link |= NFE_MEDIA_1000T;
960 seed |= NFE_SEED_1000T;
961 phy |= NFE_PHY_1000T;
964 link |= NFE_MEDIA_100TX;
965 seed |= NFE_SEED_100TX;
966 phy |= NFE_PHY_100TX;
969 link |= NFE_MEDIA_10T;
970 seed |= NFE_SEED_10T;
974 if ((phy & 0x10000000) != 0) {
975 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
976 val = NFE_R1_MAGIC_1000;
978 val = NFE_R1_MAGIC_10_100;
980 val = NFE_R1_MAGIC_DEFAULT;
981 NFE_WRITE(sc, NFE_SETUP_R1, val);
983 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
985 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
986 NFE_WRITE(sc, NFE_MISC1, misc);
987 NFE_WRITE(sc, NFE_LINKSPEED, link);
989 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
990 /* It seems all hardwares supports Rx pause frames. */
991 val = NFE_READ(sc, NFE_RXFILTER);
992 if ((IFM_OPTIONS(mii->mii_media_active) &
993 IFM_ETH_RXPAUSE) != 0)
994 val |= NFE_PFF_RX_PAUSE;
996 val &= ~NFE_PFF_RX_PAUSE;
997 NFE_WRITE(sc, NFE_RXFILTER, val);
998 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
999 val = NFE_READ(sc, NFE_MISC1);
1000 if ((IFM_OPTIONS(mii->mii_media_active) &
1001 IFM_ETH_TXPAUSE) != 0) {
1002 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1003 NFE_TX_PAUSE_FRAME_ENABLE);
1004 val |= NFE_MISC1_TX_PAUSE;
1006 val &= ~NFE_MISC1_TX_PAUSE;
1007 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1008 NFE_TX_PAUSE_FRAME_DISABLE);
1010 NFE_WRITE(sc, NFE_MISC1, val);
1013 /* disable rx/tx pause frames */
1014 val = NFE_READ(sc, NFE_RXFILTER);
1015 val &= ~NFE_PFF_RX_PAUSE;
1016 NFE_WRITE(sc, NFE_RXFILTER, val);
1017 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
1018 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1019 NFE_TX_PAUSE_FRAME_DISABLE);
1020 val = NFE_READ(sc, NFE_MISC1);
1021 val &= ~NFE_MISC1_TX_PAUSE;
1022 NFE_WRITE(sc, NFE_MISC1, val);
1029 nfe_miibus_readreg(device_t dev, int phy, int reg)
1031 struct nfe_softc *sc = device_get_softc(dev);
1035 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1037 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1038 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1042 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1044 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1046 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1049 if (ntries == NFE_TIMEOUT) {
1050 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1054 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1055 DPRINTFN(sc, 2, "could not read PHY\n");
1059 val = NFE_READ(sc, NFE_PHY_DATA);
1060 if (val != 0xffffffff && val != 0)
1061 sc->mii_phyaddr = phy;
1063 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1070 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1072 struct nfe_softc *sc = device_get_softc(dev);
1076 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1078 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1079 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1083 NFE_WRITE(sc, NFE_PHY_DATA, val);
1084 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1085 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1087 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1089 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1093 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1094 device_printf(sc->nfe_dev, "could not write to PHY\n");
1099 struct nfe_dmamap_arg {
1100 bus_addr_t nfe_busaddr;
1104 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1106 struct nfe_dmamap_arg ctx;
1107 struct nfe_rx_data *data;
1109 int i, error, descsize;
1111 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1112 desc = ring->desc64;
1113 descsize = sizeof (struct nfe_desc64);
1115 desc = ring->desc32;
1116 descsize = sizeof (struct nfe_desc32);
1119 ring->cur = ring->next = 0;
1121 error = bus_dma_tag_create(sc->nfe_parent_tag,
1122 NFE_RING_ALIGN, 0, /* alignment, boundary */
1123 BUS_SPACE_MAXADDR, /* lowaddr */
1124 BUS_SPACE_MAXADDR, /* highaddr */
1125 NULL, NULL, /* filter, filterarg */
1126 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1127 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1129 NULL, NULL, /* lockfunc, lockarg */
1130 &ring->rx_desc_tag);
1132 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1136 /* allocate memory to desc */
1137 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1138 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1140 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1143 if (sc->nfe_flags & NFE_40BIT_ADDR)
1144 ring->desc64 = desc;
1146 ring->desc32 = desc;
1148 /* map desc to device visible address space */
1149 ctx.nfe_busaddr = 0;
1150 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1151 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1153 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1156 ring->physaddr = ctx.nfe_busaddr;
1158 error = bus_dma_tag_create(sc->nfe_parent_tag,
1159 1, 0, /* alignment, boundary */
1160 BUS_SPACE_MAXADDR, /* lowaddr */
1161 BUS_SPACE_MAXADDR, /* highaddr */
1162 NULL, NULL, /* filter, filterarg */
1163 MCLBYTES, 1, /* maxsize, nsegments */
1164 MCLBYTES, /* maxsegsize */
1166 NULL, NULL, /* lockfunc, lockarg */
1167 &ring->rx_data_tag);
1169 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1173 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1175 device_printf(sc->nfe_dev,
1176 "could not create Rx DMA spare map\n");
1181 * Pre-allocate Rx buffers and populate Rx ring.
1183 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1184 data = &sc->rxq.data[i];
1185 data->rx_data_map = NULL;
1187 error = bus_dmamap_create(ring->rx_data_tag, 0,
1188 &data->rx_data_map);
1190 device_printf(sc->nfe_dev,
1191 "could not create Rx DMA map\n");
1202 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1204 struct nfe_dmamap_arg ctx;
1205 struct nfe_rx_data *data;
1207 int i, error, descsize;
1209 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1211 if (jumbo_disable != 0) {
1212 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1213 sc->nfe_jumbo_disable = 1;
1217 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1218 desc = ring->jdesc64;
1219 descsize = sizeof (struct nfe_desc64);
1221 desc = ring->jdesc32;
1222 descsize = sizeof (struct nfe_desc32);
1225 ring->jcur = ring->jnext = 0;
1227 /* Create DMA tag for jumbo Rx ring. */
1228 error = bus_dma_tag_create(sc->nfe_parent_tag,
1229 NFE_RING_ALIGN, 0, /* alignment, boundary */
1230 BUS_SPACE_MAXADDR, /* lowaddr */
1231 BUS_SPACE_MAXADDR, /* highaddr */
1232 NULL, NULL, /* filter, filterarg */
1233 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1235 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1237 NULL, NULL, /* lockfunc, lockarg */
1238 &ring->jrx_desc_tag);
1240 device_printf(sc->nfe_dev,
1241 "could not create jumbo ring DMA tag\n");
1245 /* Create DMA tag for jumbo Rx buffers. */
1246 error = bus_dma_tag_create(sc->nfe_parent_tag,
1247 1, 0, /* alignment, boundary */
1248 BUS_SPACE_MAXADDR, /* lowaddr */
1249 BUS_SPACE_MAXADDR, /* highaddr */
1250 NULL, NULL, /* filter, filterarg */
1251 MJUM9BYTES, /* maxsize */
1253 MJUM9BYTES, /* maxsegsize */
1255 NULL, NULL, /* lockfunc, lockarg */
1256 &ring->jrx_data_tag);
1258 device_printf(sc->nfe_dev,
1259 "could not create jumbo Rx buffer DMA tag\n");
1263 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1264 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1265 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1267 device_printf(sc->nfe_dev,
1268 "could not allocate DMA'able memory for jumbo Rx ring\n");
1271 if (sc->nfe_flags & NFE_40BIT_ADDR)
1272 ring->jdesc64 = desc;
1274 ring->jdesc32 = desc;
1276 ctx.nfe_busaddr = 0;
1277 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1278 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1280 device_printf(sc->nfe_dev,
1281 "could not load DMA'able memory for jumbo Rx ring\n");
1284 ring->jphysaddr = ctx.nfe_busaddr;
1286 /* Create DMA maps for jumbo Rx buffers. */
1287 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1289 device_printf(sc->nfe_dev,
1290 "could not create jumbo Rx DMA spare map\n");
1294 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1295 data = &sc->jrxq.jdata[i];
1296 data->rx_data_map = NULL;
1298 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1299 &data->rx_data_map);
1301 device_printf(sc->nfe_dev,
1302 "could not create jumbo Rx DMA map\n");
1311 * Running without jumbo frame support is ok for most cases
1312 * so don't fail on creating dma tag/map for jumbo frame.
1314 nfe_free_jrx_ring(sc, ring);
1315 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1316 "resource shortage\n");
1317 sc->nfe_jumbo_disable = 1;
1322 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1328 ring->cur = ring->next = 0;
1329 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1330 desc = ring->desc64;
1331 descsize = sizeof (struct nfe_desc64);
1333 desc = ring->desc32;
1334 descsize = sizeof (struct nfe_desc32);
1336 bzero(desc, descsize * NFE_RX_RING_COUNT);
1337 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1338 if (nfe_newbuf(sc, i) != 0)
1342 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1343 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1350 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1356 ring->jcur = ring->jnext = 0;
1357 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1358 desc = ring->jdesc64;
1359 descsize = sizeof (struct nfe_desc64);
1361 desc = ring->jdesc32;
1362 descsize = sizeof (struct nfe_desc32);
1364 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1365 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1366 if (nfe_jnewbuf(sc, i) != 0)
1370 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1371 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1378 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1380 struct nfe_rx_data *data;
1384 if (sc->nfe_flags & NFE_40BIT_ADDR)
1385 desc = ring->desc64;
1387 desc = ring->desc32;
1389 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1390 data = &ring->data[i];
1391 if (data->rx_data_map != NULL) {
1392 bus_dmamap_destroy(ring->rx_data_tag,
1394 data->rx_data_map = NULL;
1396 if (data->m != NULL) {
1401 if (ring->rx_data_tag != NULL) {
1402 if (ring->rx_spare_map != NULL) {
1403 bus_dmamap_destroy(ring->rx_data_tag,
1404 ring->rx_spare_map);
1405 ring->rx_spare_map = NULL;
1407 bus_dma_tag_destroy(ring->rx_data_tag);
1408 ring->rx_data_tag = NULL;
1412 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1413 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1414 ring->desc64 = NULL;
1415 ring->desc32 = NULL;
1417 if (ring->rx_desc_tag != NULL) {
1418 bus_dma_tag_destroy(ring->rx_desc_tag);
1419 ring->rx_desc_tag = NULL;
1425 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1427 struct nfe_rx_data *data;
1431 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1434 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1435 desc = ring->jdesc64;
1436 descsize = sizeof (struct nfe_desc64);
1438 desc = ring->jdesc32;
1439 descsize = sizeof (struct nfe_desc32);
1442 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1443 data = &ring->jdata[i];
1444 if (data->rx_data_map != NULL) {
1445 bus_dmamap_destroy(ring->jrx_data_tag,
1447 data->rx_data_map = NULL;
1449 if (data->m != NULL) {
1454 if (ring->jrx_data_tag != NULL) {
1455 if (ring->jrx_spare_map != NULL) {
1456 bus_dmamap_destroy(ring->jrx_data_tag,
1457 ring->jrx_spare_map);
1458 ring->jrx_spare_map = NULL;
1460 bus_dma_tag_destroy(ring->jrx_data_tag);
1461 ring->jrx_data_tag = NULL;
1465 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1466 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1467 ring->jdesc64 = NULL;
1468 ring->jdesc32 = NULL;
1471 if (ring->jrx_desc_tag != NULL) {
1472 bus_dma_tag_destroy(ring->jrx_desc_tag);
1473 ring->jrx_desc_tag = NULL;
1479 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1481 struct nfe_dmamap_arg ctx;
1486 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1487 desc = ring->desc64;
1488 descsize = sizeof (struct nfe_desc64);
1490 desc = ring->desc32;
1491 descsize = sizeof (struct nfe_desc32);
1495 ring->cur = ring->next = 0;
1497 error = bus_dma_tag_create(sc->nfe_parent_tag,
1498 NFE_RING_ALIGN, 0, /* alignment, boundary */
1499 BUS_SPACE_MAXADDR, /* lowaddr */
1500 BUS_SPACE_MAXADDR, /* highaddr */
1501 NULL, NULL, /* filter, filterarg */
1502 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1503 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1505 NULL, NULL, /* lockfunc, lockarg */
1506 &ring->tx_desc_tag);
1508 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1512 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1513 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1515 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1518 if (sc->nfe_flags & NFE_40BIT_ADDR)
1519 ring->desc64 = desc;
1521 ring->desc32 = desc;
1523 ctx.nfe_busaddr = 0;
1524 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1525 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1527 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1530 ring->physaddr = ctx.nfe_busaddr;
1532 error = bus_dma_tag_create(sc->nfe_parent_tag,
1542 &ring->tx_data_tag);
1544 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1548 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1549 error = bus_dmamap_create(ring->tx_data_tag, 0,
1550 &ring->data[i].tx_data_map);
1552 device_printf(sc->nfe_dev,
1553 "could not create Tx DMA map\n");
1564 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1569 sc->nfe_force_tx = 0;
1571 ring->cur = ring->next = 0;
1572 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1573 desc = ring->desc64;
1574 descsize = sizeof (struct nfe_desc64);
1576 desc = ring->desc32;
1577 descsize = sizeof (struct nfe_desc32);
1579 bzero(desc, descsize * NFE_TX_RING_COUNT);
1581 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1582 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1587 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1589 struct nfe_tx_data *data;
1593 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1594 desc = ring->desc64;
1595 descsize = sizeof (struct nfe_desc64);
1597 desc = ring->desc32;
1598 descsize = sizeof (struct nfe_desc32);
1601 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1602 data = &ring->data[i];
1604 if (data->m != NULL) {
1605 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1606 BUS_DMASYNC_POSTWRITE);
1607 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1611 if (data->tx_data_map != NULL) {
1612 bus_dmamap_destroy(ring->tx_data_tag,
1614 data->tx_data_map = NULL;
1618 if (ring->tx_data_tag != NULL) {
1619 bus_dma_tag_destroy(ring->tx_data_tag);
1620 ring->tx_data_tag = NULL;
1624 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1625 BUS_DMASYNC_POSTWRITE);
1626 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1627 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1628 ring->desc64 = NULL;
1629 ring->desc32 = NULL;
1630 bus_dma_tag_destroy(ring->tx_desc_tag);
1631 ring->tx_desc_tag = NULL;
1635 #ifdef DEVICE_POLLING
1636 static poll_handler_t nfe_poll;
1640 nfe_poll(if_t ifp, enum poll_cmd cmd, int count)
1642 struct nfe_softc *sc = if_getsoftc(ifp);
1648 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1653 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1654 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1656 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1658 if (!if_sendq_empty(ifp))
1659 nfe_start_locked(ifp);
1661 if (cmd == POLL_AND_CHECK_STATUS) {
1662 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1666 NFE_WRITE(sc, sc->nfe_irq_status, r);
1668 if (r & NFE_IRQ_LINK) {
1669 NFE_READ(sc, NFE_PHY_STATUS);
1670 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1671 DPRINTF(sc, "link state changed\n");
1677 #endif /* DEVICE_POLLING */
1680 nfe_set_intr(struct nfe_softc *sc)
1683 if (sc->nfe_msi != 0)
1684 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1688 /* In MSIX, a write to mask reegisters behaves as XOR. */
1689 static __inline void
1690 nfe_enable_intr(struct nfe_softc *sc)
1693 if (sc->nfe_msix != 0) {
1694 /* XXX Should have a better way to enable interrupts! */
1695 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1696 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1698 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1702 static __inline void
1703 nfe_disable_intr(struct nfe_softc *sc)
1706 if (sc->nfe_msix != 0) {
1707 /* XXX Should have a better way to disable interrupts! */
1708 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1709 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1711 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1716 nfe_ioctl(if_t ifp, u_long cmd, caddr_t data)
1718 struct nfe_softc *sc;
1720 struct mii_data *mii;
1721 int error, init, mask;
1723 sc = if_getsoftc(ifp);
1724 ifr = (struct ifreq *) data;
1729 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1731 else if (if_getmtu(ifp) != ifr->ifr_mtu) {
1732 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1733 (sc->nfe_jumbo_disable != 0)) &&
1734 ifr->ifr_mtu > ETHERMTU)
1738 if_setmtu(ifp, ifr->ifr_mtu);
1739 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1740 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1741 nfe_init_locked(sc);
1749 if (if_getflags(ifp) & IFF_UP) {
1751 * If only the PROMISC or ALLMULTI flag changes, then
1752 * don't do a full re-init of the chip, just update
1755 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
1756 ((if_getflags(ifp) ^ sc->nfe_if_flags) &
1757 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1760 nfe_init_locked(sc);
1762 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1765 sc->nfe_if_flags = if_getflags(ifp);
1771 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1780 mii = device_get_softc(sc->nfe_miibus);
1781 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1784 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1785 #ifdef DEVICE_POLLING
1786 if ((mask & IFCAP_POLLING) != 0) {
1787 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1788 error = ether_poll_register(nfe_poll, ifp);
1792 nfe_disable_intr(sc);
1793 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1796 error = ether_poll_deregister(ifp);
1797 /* Enable interrupt even in error case */
1799 nfe_enable_intr(sc);
1800 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1804 #endif /* DEVICE_POLLING */
1805 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1806 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
1807 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1808 if ((mask & IFCAP_TXCSUM) != 0 &&
1809 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
1810 if_togglecapenable(ifp, IFCAP_TXCSUM);
1811 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1812 if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0);
1814 if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES);
1816 if ((mask & IFCAP_RXCSUM) != 0 &&
1817 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
1818 if_togglecapenable(ifp, IFCAP_RXCSUM);
1821 if ((mask & IFCAP_TSO4) != 0 &&
1822 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
1823 if_togglecapenable(ifp, IFCAP_TSO4);
1824 if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
1825 if_sethwassistbits(ifp, CSUM_TSO, 0);
1827 if_sethwassistbits(ifp, 0, CSUM_TSO);
1829 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1830 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
1831 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
1832 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1833 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
1834 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1835 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
1836 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
1841 * It seems that VLAN stripping requires Rx checksum offload.
1842 * Unfortunately FreeBSD has no way to disable only Rx side
1843 * VLAN stripping. So when we know Rx checksum offload is
1844 * disabled turn entire hardware VLAN assist off.
1846 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) {
1847 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
1849 if_setcapenablebit(ifp, 0,
1850 (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO));
1852 if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1853 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1859 error = ether_ioctl(ifp, cmd, data);
1870 struct nfe_softc *sc;
1873 sc = (struct nfe_softc *)arg;
1875 status = NFE_READ(sc, sc->nfe_irq_status);
1876 if (status == 0 || status == 0xffffffff)
1877 return (FILTER_STRAY);
1878 nfe_disable_intr(sc);
1879 taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
1881 return (FILTER_HANDLED);
1886 nfe_int_task(void *arg, int pending)
1888 struct nfe_softc *sc = arg;
1889 if_t ifp = sc->nfe_ifp;
1895 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1896 nfe_enable_intr(sc);
1898 return; /* not for us */
1900 NFE_WRITE(sc, sc->nfe_irq_status, r);
1902 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1904 #ifdef DEVICE_POLLING
1905 if (if_getcapenable(ifp) & IFCAP_POLLING) {
1911 if (r & NFE_IRQ_LINK) {
1912 NFE_READ(sc, NFE_PHY_STATUS);
1913 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1914 DPRINTF(sc, "link state changed\n");
1917 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1919 nfe_disable_intr(sc);
1925 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1926 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1928 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1932 if (!if_sendq_empty(ifp))
1933 nfe_start_locked(ifp);
1937 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1938 taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task);
1942 /* Reenable interrupts. */
1943 nfe_enable_intr(sc);
1947 static __inline void
1948 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1950 struct nfe_desc32 *desc32;
1951 struct nfe_desc64 *desc64;
1952 struct nfe_rx_data *data;
1955 data = &sc->rxq.data[idx];
1958 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1959 desc64 = &sc->rxq.desc64[idx];
1960 /* VLAN packet may have overwritten it. */
1961 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1962 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1963 desc64->length = htole16(m->m_len);
1964 desc64->flags = htole16(NFE_RX_READY);
1966 desc32 = &sc->rxq.desc32[idx];
1967 desc32->length = htole16(m->m_len);
1968 desc32->flags = htole16(NFE_RX_READY);
1973 static __inline void
1974 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1976 struct nfe_desc32 *desc32;
1977 struct nfe_desc64 *desc64;
1978 struct nfe_rx_data *data;
1981 data = &sc->jrxq.jdata[idx];
1984 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1985 desc64 = &sc->jrxq.jdesc64[idx];
1986 /* VLAN packet may have overwritten it. */
1987 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1988 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1989 desc64->length = htole16(m->m_len);
1990 desc64->flags = htole16(NFE_RX_READY);
1992 desc32 = &sc->jrxq.jdesc32[idx];
1993 desc32->length = htole16(m->m_len);
1994 desc32->flags = htole16(NFE_RX_READY);
2000 nfe_newbuf(struct nfe_softc *sc, int idx)
2002 struct nfe_rx_data *data;
2003 struct nfe_desc32 *desc32;
2004 struct nfe_desc64 *desc64;
2006 bus_dma_segment_t segs[1];
2010 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2014 m->m_len = m->m_pkthdr.len = MCLBYTES;
2015 m_adj(m, ETHER_ALIGN);
2017 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
2018 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2022 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2024 data = &sc->rxq.data[idx];
2025 if (data->m != NULL) {
2026 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2027 BUS_DMASYNC_POSTREAD);
2028 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
2030 map = data->rx_data_map;
2031 data->rx_data_map = sc->rxq.rx_spare_map;
2032 sc->rxq.rx_spare_map = map;
2033 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2034 BUS_DMASYNC_PREREAD);
2035 data->paddr = segs[0].ds_addr;
2037 /* update mapping address in h/w descriptor */
2038 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2039 desc64 = &sc->rxq.desc64[idx];
2040 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2041 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2042 desc64->length = htole16(segs[0].ds_len);
2043 desc64->flags = htole16(NFE_RX_READY);
2045 desc32 = &sc->rxq.desc32[idx];
2046 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2047 desc32->length = htole16(segs[0].ds_len);
2048 desc32->flags = htole16(NFE_RX_READY);
2056 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2058 struct nfe_rx_data *data;
2059 struct nfe_desc32 *desc32;
2060 struct nfe_desc64 *desc64;
2062 bus_dma_segment_t segs[1];
2066 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2069 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2070 m_adj(m, ETHER_ALIGN);
2072 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2073 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2077 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2079 data = &sc->jrxq.jdata[idx];
2080 if (data->m != NULL) {
2081 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2082 BUS_DMASYNC_POSTREAD);
2083 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2085 map = data->rx_data_map;
2086 data->rx_data_map = sc->jrxq.jrx_spare_map;
2087 sc->jrxq.jrx_spare_map = map;
2088 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2089 BUS_DMASYNC_PREREAD);
2090 data->paddr = segs[0].ds_addr;
2092 /* update mapping address in h/w descriptor */
2093 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2094 desc64 = &sc->jrxq.jdesc64[idx];
2095 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2096 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2097 desc64->length = htole16(segs[0].ds_len);
2098 desc64->flags = htole16(NFE_RX_READY);
2100 desc32 = &sc->jrxq.jdesc32[idx];
2101 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2102 desc32->length = htole16(segs[0].ds_len);
2103 desc32->flags = htole16(NFE_RX_READY);
2111 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2113 if_t ifp = sc->nfe_ifp;
2114 struct nfe_desc32 *desc32;
2115 struct nfe_desc64 *desc64;
2116 struct nfe_rx_data *data;
2119 int len, prog, rx_npkts;
2123 NFE_LOCK_ASSERT(sc);
2125 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2126 BUS_DMASYNC_POSTREAD);
2128 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2133 data = &sc->rxq.data[sc->rxq.cur];
2135 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2136 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2137 vtag = le32toh(desc64->physaddr[1]);
2138 flags = le16toh(desc64->flags);
2139 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2141 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2142 flags = le16toh(desc32->flags);
2143 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2146 if (flags & NFE_RX_READY)
2149 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2150 if (!(flags & NFE_RX_VALID_V1)) {
2151 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2152 nfe_discard_rxbuf(sc, sc->rxq.cur);
2155 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2156 flags &= ~NFE_RX_ERROR;
2157 len--; /* fix buffer length */
2160 if (!(flags & NFE_RX_VALID_V2)) {
2161 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2162 nfe_discard_rxbuf(sc, sc->rxq.cur);
2166 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2167 flags &= ~NFE_RX_ERROR;
2168 len--; /* fix buffer length */
2172 if (flags & NFE_RX_ERROR) {
2173 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2174 nfe_discard_rxbuf(sc, sc->rxq.cur);
2179 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2180 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2181 nfe_discard_rxbuf(sc, sc->rxq.cur);
2185 if ((vtag & NFE_RX_VTAG) != 0 &&
2186 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2187 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2188 m->m_flags |= M_VLANTAG;
2191 m->m_pkthdr.len = m->m_len = len;
2192 m->m_pkthdr.rcvif = ifp;
2194 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2195 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2196 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2197 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2198 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2199 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2200 m->m_pkthdr.csum_flags |=
2201 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2202 m->m_pkthdr.csum_data = 0xffff;
2207 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2216 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2217 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2219 if (rx_npktsp != NULL)
2220 *rx_npktsp = rx_npkts;
2221 return (count > 0 ? 0 : EAGAIN);
2226 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2228 if_t ifp = sc->nfe_ifp;
2229 struct nfe_desc32 *desc32;
2230 struct nfe_desc64 *desc64;
2231 struct nfe_rx_data *data;
2234 int len, prog, rx_npkts;
2238 NFE_LOCK_ASSERT(sc);
2240 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2241 BUS_DMASYNC_POSTREAD);
2243 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2249 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2251 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2252 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2253 vtag = le32toh(desc64->physaddr[1]);
2254 flags = le16toh(desc64->flags);
2255 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2257 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2258 flags = le16toh(desc32->flags);
2259 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2262 if (flags & NFE_RX_READY)
2265 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2266 if (!(flags & NFE_RX_VALID_V1)) {
2267 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2268 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2271 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2272 flags &= ~NFE_RX_ERROR;
2273 len--; /* fix buffer length */
2276 if (!(flags & NFE_RX_VALID_V2)) {
2277 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2278 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2282 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2283 flags &= ~NFE_RX_ERROR;
2284 len--; /* fix buffer length */
2288 if (flags & NFE_RX_ERROR) {
2289 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2290 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2295 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2296 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2297 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2301 if ((vtag & NFE_RX_VTAG) != 0 &&
2302 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2303 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2304 m->m_flags |= M_VLANTAG;
2307 m->m_pkthdr.len = m->m_len = len;
2308 m->m_pkthdr.rcvif = ifp;
2310 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2311 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2312 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2313 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2314 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2315 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2316 m->m_pkthdr.csum_flags |=
2317 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2318 m->m_pkthdr.csum_data = 0xffff;
2323 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2332 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2333 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2335 if (rx_npktsp != NULL)
2336 *rx_npktsp = rx_npkts;
2337 return (count > 0 ? 0 : EAGAIN);
2342 nfe_txeof(struct nfe_softc *sc)
2344 if_t ifp = sc->nfe_ifp;
2345 struct nfe_desc32 *desc32;
2346 struct nfe_desc64 *desc64;
2347 struct nfe_tx_data *data = NULL;
2351 NFE_LOCK_ASSERT(sc);
2353 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2354 BUS_DMASYNC_POSTREAD);
2357 for (cons = sc->txq.next; cons != sc->txq.cur;
2358 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2359 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2360 desc64 = &sc->txq.desc64[cons];
2361 flags = le16toh(desc64->flags);
2363 desc32 = &sc->txq.desc32[cons];
2364 flags = le16toh(desc32->flags);
2367 if (flags & NFE_TX_VALID)
2372 data = &sc->txq.data[cons];
2374 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2375 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2377 if ((flags & NFE_TX_ERROR_V1) != 0) {
2378 device_printf(sc->nfe_dev,
2379 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2381 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2383 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2385 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2387 if ((flags & NFE_TX_ERROR_V2) != 0) {
2388 device_printf(sc->nfe_dev,
2389 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2390 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2392 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2395 /* last fragment of the mbuf chain transmitted */
2396 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2397 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2398 BUS_DMASYNC_POSTWRITE);
2399 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2405 sc->nfe_force_tx = 0;
2406 sc->txq.next = cons;
2407 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2408 if (sc->txq.queued == 0)
2409 sc->nfe_watchdog_timer = 0;
2414 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2416 struct nfe_desc32 *desc32 = NULL;
2417 struct nfe_desc64 *desc64 = NULL;
2419 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2420 int error, i, nsegs, prod, si;
2422 uint16_t cflags, flags;
2425 prod = si = sc->txq.cur;
2426 map = sc->txq.data[prod].tx_data_map;
2428 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2429 &nsegs, BUS_DMA_NOWAIT);
2430 if (error == EFBIG) {
2431 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2438 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2439 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2445 } else if (error != 0)
2453 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2454 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2461 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2462 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2464 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2465 cflags |= NFE_TX_TSO;
2466 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2467 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2468 cflags |= NFE_TX_IP_CSUM;
2469 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2470 cflags |= NFE_TX_TCP_UDP_CSUM;
2471 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2472 cflags |= NFE_TX_TCP_UDP_CSUM;
2475 for (i = 0; i < nsegs; i++) {
2476 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2477 desc64 = &sc->txq.desc64[prod];
2478 desc64->physaddr[0] =
2479 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2480 desc64->physaddr[1] =
2481 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2483 desc64->length = htole16(segs[i].ds_len - 1);
2484 desc64->flags = htole16(flags);
2486 desc32 = &sc->txq.desc32[prod];
2488 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2489 desc32->length = htole16(segs[i].ds_len - 1);
2490 desc32->flags = htole16(flags);
2494 * Setting of the valid bit in the first descriptor is
2495 * deferred until the whole chain is fully setup.
2497 flags |= NFE_TX_VALID;
2500 NFE_INC(prod, NFE_TX_RING_COUNT);
2504 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2505 * csum flags, vtag and TSO belong to the first fragment only.
2507 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2508 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2509 desc64 = &sc->txq.desc64[si];
2510 if ((m->m_flags & M_VLANTAG) != 0)
2511 desc64->vtag = htole32(NFE_TX_VTAG |
2512 m->m_pkthdr.ether_vtag);
2513 if (tsosegsz != 0) {
2516 * The following indicates the descriptor element
2517 * is a 32bit quantity.
2519 desc64->length |= htole16((uint16_t)tsosegsz);
2520 desc64->flags |= htole16(tsosegsz >> 16);
2523 * finally, set the valid/checksum/TSO bit in the first
2526 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2528 if (sc->nfe_flags & NFE_JUMBO_SUP)
2529 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2531 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2532 desc32 = &sc->txq.desc32[si];
2533 if (tsosegsz != 0) {
2536 * The following indicates the descriptor element
2537 * is a 32bit quantity.
2539 desc32->length |= htole16((uint16_t)tsosegsz);
2540 desc32->flags |= htole16(tsosegsz >> 16);
2543 * finally, set the valid/checksum/TSO bit in the first
2546 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2550 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2551 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2552 sc->txq.data[prod].tx_data_map = map;
2553 sc->txq.data[prod].m = m;
2555 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2560 struct nfe_hash_maddr_ctx {
2561 uint8_t addr[ETHER_ADDR_LEN];
2562 uint8_t mask[ETHER_ADDR_LEN];
2566 nfe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
2568 struct nfe_hash_maddr_ctx *ctx = arg;
2569 uint8_t *addrp, mcaddr;
2572 addrp = LLADDR(sdl);
2573 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2575 ctx->addr[j] &= mcaddr;
2576 ctx->mask[j] &= ~mcaddr;
2583 nfe_setmulti(struct nfe_softc *sc)
2585 if_t ifp = sc->nfe_ifp;
2586 struct nfe_hash_maddr_ctx ctx;
2588 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2589 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2593 NFE_LOCK_ASSERT(sc);
2595 if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2596 bzero(ctx.addr, ETHER_ADDR_LEN);
2597 bzero(ctx.mask, ETHER_ADDR_LEN);
2601 bcopy(etherbroadcastaddr, ctx.addr, ETHER_ADDR_LEN);
2602 bcopy(etherbroadcastaddr, ctx.mask, ETHER_ADDR_LEN);
2604 if_foreach_llmaddr(ifp, nfe_hash_maddr, &ctx);
2606 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2607 ctx.mask[i] |= ctx.addr[i];
2611 ctx.addr[0] |= 0x01; /* make sure multicast bit is set */
2613 NFE_WRITE(sc, NFE_MULTIADDR_HI, ctx.addr[3] << 24 | ctx.addr[2] << 16 |
2614 ctx.addr[1] << 8 | ctx.addr[0]);
2615 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2616 ctx.addr[5] << 8 | ctx.addr[4]);
2617 NFE_WRITE(sc, NFE_MULTIMASK_HI, ctx.mask[3] << 24 | ctx.mask[2] << 16 |
2618 ctx.mask[1] << 8 | ctx.mask[0]);
2619 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2620 ctx.mask[5] << 8 | ctx.mask[4]);
2622 filter = NFE_READ(sc, NFE_RXFILTER);
2623 filter &= NFE_PFF_RX_PAUSE;
2624 filter |= NFE_RXFILTER_MAGIC;
2625 filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2626 NFE_WRITE(sc, NFE_RXFILTER, filter);
2633 struct nfe_softc *sc = if_getsoftc(ifp);
2636 nfe_start_locked(ifp);
2641 nfe_start_locked(if_t ifp)
2643 struct nfe_softc *sc = if_getsoftc(ifp);
2647 NFE_LOCK_ASSERT(sc);
2649 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2650 IFF_DRV_RUNNING || sc->nfe_link == 0)
2653 while (!if_sendq_empty(ifp)) {
2654 m0 = if_dequeue(ifp);
2659 if (nfe_encap(sc, &m0) != 0) {
2662 if_sendq_prepend(ifp, m0);
2663 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
2667 if_etherbpfmtap(ifp, m0);
2671 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2672 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2675 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2678 * Set a timeout in case the chip goes out to lunch.
2680 sc->nfe_watchdog_timer = 5;
2686 nfe_watchdog(if_t ifp)
2688 struct nfe_softc *sc = if_getsoftc(ifp);
2690 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2693 /* Check if we've lost Tx completion interrupt. */
2695 if (sc->txq.queued == 0) {
2696 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2698 if (!if_sendq_empty(ifp))
2699 nfe_start_locked(ifp);
2702 /* Check if we've lost start Tx command. */
2704 if (sc->nfe_force_tx <= 3) {
2706 * If this is the case for watchdog timeout, the following
2707 * code should go to nfe_txeof().
2709 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2712 sc->nfe_force_tx = 0;
2714 if_printf(ifp, "watchdog timeout\n");
2716 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2717 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2718 nfe_init_locked(sc);
2725 struct nfe_softc *sc = xsc;
2728 nfe_init_locked(sc);
2734 nfe_init_locked(void *xsc)
2736 struct nfe_softc *sc = xsc;
2737 if_t ifp = sc->nfe_ifp;
2738 struct mii_data *mii;
2742 NFE_LOCK_ASSERT(sc);
2744 mii = device_get_softc(sc->nfe_miibus);
2746 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2751 sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
2753 nfe_init_tx_ring(sc, &sc->txq);
2754 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2755 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2757 error = nfe_init_rx_ring(sc, &sc->rxq);
2759 device_printf(sc->nfe_dev,
2760 "initialization failed: no memory for rx buffers\n");
2766 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2767 val |= NFE_MAC_ADDR_INORDER;
2768 NFE_WRITE(sc, NFE_TX_UNK, val);
2769 NFE_WRITE(sc, NFE_STATUS, 0);
2771 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2772 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2774 sc->rxtxctl = NFE_RXTX_BIT2;
2775 if (sc->nfe_flags & NFE_40BIT_ADDR)
2776 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2777 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2778 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2780 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2781 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2782 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2783 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2785 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2787 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2789 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2790 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2792 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2794 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2796 /* set MAC address */
2797 nfe_set_macaddr(sc, if_getlladdr(ifp));
2799 /* tell MAC where rings are in memory */
2800 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2801 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2802 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2803 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2804 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2806 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2807 NFE_ADDR_HI(sc->rxq.physaddr));
2808 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2809 NFE_ADDR_LO(sc->rxq.physaddr));
2811 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2812 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2814 NFE_WRITE(sc, NFE_RING_SIZE,
2815 (NFE_RX_RING_COUNT - 1) << 16 |
2816 (NFE_TX_RING_COUNT - 1));
2818 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2820 /* force MAC to wakeup */
2821 val = NFE_READ(sc, NFE_PWR_STATE);
2822 if ((val & NFE_PWR_WAKEUP) == 0)
2823 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2825 val = NFE_READ(sc, NFE_PWR_STATE);
2826 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2829 /* configure interrupts coalescing/mitigation */
2830 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2832 /* no interrupt mitigation: one interrupt per packet */
2833 NFE_WRITE(sc, NFE_IMTIMER, 970);
2836 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2837 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2838 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2840 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2841 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2843 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2845 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2847 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2848 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2850 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2856 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2859 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2861 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2863 /* Clear hardware stats. */
2864 nfe_stats_clear(sc);
2866 #ifdef DEVICE_POLLING
2867 if (if_getcapenable(ifp) & IFCAP_POLLING)
2868 nfe_disable_intr(sc);
2872 nfe_enable_intr(sc); /* enable interrupts */
2874 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2875 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2880 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2887 struct nfe_softc *sc = if_getsoftc(ifp);
2888 struct nfe_rx_ring *rx_ring;
2889 struct nfe_jrx_ring *jrx_ring;
2890 struct nfe_tx_ring *tx_ring;
2891 struct nfe_rx_data *rdata;
2892 struct nfe_tx_data *tdata;
2895 NFE_LOCK_ASSERT(sc);
2897 sc->nfe_watchdog_timer = 0;
2898 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2900 callout_stop(&sc->nfe_stat_ch);
2903 NFE_WRITE(sc, NFE_TX_CTL, 0);
2906 NFE_WRITE(sc, NFE_RX_CTL, 0);
2908 /* disable interrupts */
2909 nfe_disable_intr(sc);
2913 /* free Rx and Tx mbufs still in the queues. */
2915 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2916 rdata = &rx_ring->data[i];
2917 if (rdata->m != NULL) {
2918 bus_dmamap_sync(rx_ring->rx_data_tag,
2919 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2920 bus_dmamap_unload(rx_ring->rx_data_tag,
2921 rdata->rx_data_map);
2927 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2928 jrx_ring = &sc->jrxq;
2929 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2930 rdata = &jrx_ring->jdata[i];
2931 if (rdata->m != NULL) {
2932 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2933 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2934 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2935 rdata->rx_data_map);
2943 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2944 tdata = &tx_ring->data[i];
2945 if (tdata->m != NULL) {
2946 bus_dmamap_sync(tx_ring->tx_data_tag,
2947 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2948 bus_dmamap_unload(tx_ring->tx_data_tag,
2949 tdata->tx_data_map);
2954 /* Update hardware stats. */
2955 nfe_stats_update(sc);
2960 nfe_ifmedia_upd(if_t ifp)
2962 struct nfe_softc *sc = if_getsoftc(ifp);
2963 struct mii_data *mii;
2966 mii = device_get_softc(sc->nfe_miibus);
2975 nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
2977 struct nfe_softc *sc;
2978 struct mii_data *mii;
2980 sc = if_getsoftc(ifp);
2983 mii = device_get_softc(sc->nfe_miibus);
2986 ifmr->ifm_active = mii->mii_media_active;
2987 ifmr->ifm_status = mii->mii_media_status;
2995 struct nfe_softc *sc;
2996 struct mii_data *mii;
2999 sc = (struct nfe_softc *)xsc;
3001 NFE_LOCK_ASSERT(sc);
3005 mii = device_get_softc(sc->nfe_miibus);
3007 nfe_stats_update(sc);
3009 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
3014 nfe_shutdown(device_t dev)
3017 return (nfe_suspend(dev));
3022 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
3026 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
3027 val = NFE_READ(sc, NFE_MACADDR_LO);
3028 addr[0] = (val >> 8) & 0xff;
3029 addr[1] = (val & 0xff);
3031 val = NFE_READ(sc, NFE_MACADDR_HI);
3032 addr[2] = (val >> 24) & 0xff;
3033 addr[3] = (val >> 16) & 0xff;
3034 addr[4] = (val >> 8) & 0xff;
3035 addr[5] = (val & 0xff);
3037 val = NFE_READ(sc, NFE_MACADDR_LO);
3038 addr[5] = (val >> 8) & 0xff;
3039 addr[4] = (val & 0xff);
3041 val = NFE_READ(sc, NFE_MACADDR_HI);
3042 addr[3] = (val >> 24) & 0xff;
3043 addr[2] = (val >> 16) & 0xff;
3044 addr[1] = (val >> 8) & 0xff;
3045 addr[0] = (val & 0xff);
3051 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3054 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3055 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3056 addr[1] << 8 | addr[0]);
3061 * Map a single buffer address.
3065 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3067 struct nfe_dmamap_arg *ctx;
3072 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3074 ctx = (struct nfe_dmamap_arg *)arg;
3075 ctx->nfe_busaddr = segs[0].ds_addr;
3080 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3086 value = *(int *)arg1;
3087 error = sysctl_handle_int(oidp, &value, 0, req);
3088 if (error || !req->newptr)
3090 if (value < low || value > high)
3092 *(int *)arg1 = value;
3099 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3102 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3107 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3108 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3109 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3110 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3113 nfe_sysctl_node(struct nfe_softc *sc)
3115 struct sysctl_ctx_list *ctx;
3116 struct sysctl_oid_list *child, *parent;
3117 struct sysctl_oid *tree;
3118 struct nfe_hw_stats *stats;
3121 stats = &sc->nfe_stats;
3122 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3123 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3124 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
3125 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
3126 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3127 "max number of Rx events to process");
3129 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3130 error = resource_int_value(device_get_name(sc->nfe_dev),
3131 device_get_unit(sc->nfe_dev), "process_limit",
3132 &sc->nfe_process_limit);
3134 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3135 sc->nfe_process_limit > NFE_PROC_MAX) {
3136 device_printf(sc->nfe_dev,
3137 "process_limit value out of range; "
3138 "using default: %d\n", NFE_PROC_DEFAULT);
3139 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3143 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3146 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats",
3147 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NFE statistics");
3148 parent = SYSCTL_CHILDREN(tree);
3150 /* Rx statistics. */
3151 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx",
3152 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics");
3153 child = SYSCTL_CHILDREN(tree);
3155 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3156 &stats->rx_frame_errors, "Framing Errors");
3157 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3158 &stats->rx_extra_bytes, "Extra Bytes");
3159 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3160 &stats->rx_late_cols, "Late Collisions");
3161 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3162 &stats->rx_runts, "Runts");
3163 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3164 &stats->rx_jumbos, "Jumbos");
3165 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3166 &stats->rx_fifo_overuns, "FIFO Overruns");
3167 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3168 &stats->rx_crc_errors, "CRC Errors");
3169 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3170 &stats->rx_fae, "Frame Alignment Errors");
3171 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3172 &stats->rx_len_errors, "Length Errors");
3173 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3174 &stats->rx_unicast, "Unicast Frames");
3175 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3176 &stats->rx_multicast, "Multicast Frames");
3177 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3178 &stats->rx_broadcast, "Broadcast Frames");
3179 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3180 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3181 &stats->rx_octets, "Octets");
3182 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3183 &stats->rx_pause, "Pause frames");
3184 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3185 &stats->rx_drops, "Drop frames");
3188 /* Tx statistics. */
3189 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx",
3190 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics");
3191 child = SYSCTL_CHILDREN(tree);
3192 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3193 &stats->tx_octets, "Octets");
3194 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3195 &stats->tx_zero_rexmits, "Zero Retransmits");
3196 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3197 &stats->tx_one_rexmits, "One Retransmits");
3198 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3199 &stats->tx_multi_rexmits, "Multiple Retransmits");
3200 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3201 &stats->tx_late_cols, "Late Collisions");
3202 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3203 &stats->tx_fifo_underuns, "FIFO Underruns");
3204 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3205 &stats->tx_carrier_losts, "Carrier Losts");
3206 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3207 &stats->tx_excess_deferals, "Excess Deferrals");
3208 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3209 &stats->tx_retry_errors, "Retry Errors");
3210 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3211 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3212 &stats->tx_deferals, "Deferrals");
3213 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3214 &stats->tx_frames, "Frames");
3215 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3216 &stats->tx_pause, "Pause Frames");
3218 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3219 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3220 &stats->tx_deferals, "Unicast Frames");
3221 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3222 &stats->tx_frames, "Multicast Frames");
3223 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3224 &stats->tx_pause, "Broadcast Frames");
3228 #undef NFE_SYSCTL_STAT_ADD32
3229 #undef NFE_SYSCTL_STAT_ADD64
3232 nfe_stats_clear(struct nfe_softc *sc)
3236 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3237 mib_cnt = NFE_NUM_MIB_STATV1;
3238 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3239 mib_cnt = NFE_NUM_MIB_STATV2;
3243 for (i = 0; i < mib_cnt; i++)
3244 NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
3246 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3247 NFE_READ(sc, NFE_TX_UNICAST);
3248 NFE_READ(sc, NFE_TX_MULTICAST);
3249 NFE_READ(sc, NFE_TX_BROADCAST);
3254 nfe_stats_update(struct nfe_softc *sc)
3256 struct nfe_hw_stats *stats;
3258 NFE_LOCK_ASSERT(sc);
3260 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3263 stats = &sc->nfe_stats;
3264 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3265 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3266 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3267 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3268 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3269 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3270 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3271 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3272 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3273 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3274 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3275 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3276 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3277 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3278 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3279 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3280 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3281 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3282 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3283 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3284 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3286 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3287 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3288 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3289 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3290 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3291 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3292 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3295 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3296 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3297 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3298 stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3304 nfe_set_linkspeed(struct nfe_softc *sc)
3306 struct mii_softc *miisc;
3307 struct mii_data *mii;
3310 NFE_LOCK_ASSERT(sc);
3312 mii = device_get_softc(sc->nfe_miibus);
3315 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3316 (IFM_ACTIVE | IFM_AVALID)) {
3317 switch IFM_SUBTYPE(mii->mii_media_active) {
3328 miisc = LIST_FIRST(&mii->mii_phys);
3329 phyno = miisc->mii_phy;
3330 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3332 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3333 nfe_miibus_writereg(sc->nfe_dev, phyno,
3334 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3335 nfe_miibus_writereg(sc->nfe_dev, phyno,
3336 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3340 * Poll link state until nfe(4) get a 10/100Mbps link.
3342 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3344 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3345 == (IFM_ACTIVE | IFM_AVALID)) {
3346 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3349 nfe_mac_config(sc, mii);
3356 pause("nfelnk", hz);
3359 if (i == MII_ANEGTICKS_GIGE)
3360 device_printf(sc->nfe_dev,
3361 "establishing a link failed, WOL may not work!");
3364 * No link, force MAC to have 100Mbps, full-duplex link.
3365 * This is the last resort and may/may not work.
3367 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3368 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3369 nfe_mac_config(sc, mii);
3374 nfe_set_wol(struct nfe_softc *sc)
3381 NFE_LOCK_ASSERT(sc);
3383 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3386 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
3387 wolctl = NFE_WOL_MAGIC;
3390 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3391 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
3392 nfe_set_linkspeed(sc);
3393 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3394 NFE_WRITE(sc, NFE_PWR2_CTL,
3395 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3397 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3398 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3399 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3402 /* Request PME if WOL is requested. */
3403 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3404 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3405 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
3406 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3407 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);