1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_var.h>
45 #include <net/if_arp.h>
46 #include <net/ethernet.h>
47 #include <net/if_dl.h>
48 #include <net/if_media.h>
49 #include <net/if_types.h>
50 #include <net/if_vlan_var.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
62 #include <dev/pci/pcireg.h>
63 #include <dev/pci/pcivar.h>
65 #include <dev/nfe/if_nfereg.h>
66 #include <dev/nfe/if_nfevar.h>
68 MODULE_DEPEND(nfe, pci, 1, 1, 1);
69 MODULE_DEPEND(nfe, ether, 1, 1, 1);
70 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
72 /* "device miibus" required. See GENERIC if you get errors here. */
73 #include "miibus_if.h"
75 static int nfe_probe(device_t);
76 static int nfe_attach(device_t);
77 static int nfe_detach(device_t);
78 static int nfe_suspend(device_t);
79 static int nfe_resume(device_t);
80 static int nfe_shutdown(device_t);
81 static int nfe_can_use_msix(struct nfe_softc *);
82 static int nfe_detect_msik9(struct nfe_softc *);
83 static void nfe_power(struct nfe_softc *);
84 static int nfe_miibus_readreg(device_t, int, int);
85 static int nfe_miibus_writereg(device_t, int, int, int);
86 static void nfe_miibus_statchg(device_t);
87 static void nfe_mac_config(struct nfe_softc *, struct mii_data *);
88 static void nfe_set_intr(struct nfe_softc *);
89 static __inline void nfe_enable_intr(struct nfe_softc *);
90 static __inline void nfe_disable_intr(struct nfe_softc *);
91 static int nfe_ioctl(if_t, u_long, caddr_t);
92 static void nfe_alloc_msix(struct nfe_softc *, int);
93 static int nfe_intr(void *);
94 static void nfe_int_task(void *, int);
95 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
96 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
97 static int nfe_newbuf(struct nfe_softc *, int);
98 static int nfe_jnewbuf(struct nfe_softc *, int);
99 static int nfe_rxeof(struct nfe_softc *, int, int *);
100 static int nfe_jrxeof(struct nfe_softc *, int, int *);
101 static void nfe_txeof(struct nfe_softc *);
102 static int nfe_encap(struct nfe_softc *, struct mbuf **);
103 static void nfe_setmulti(struct nfe_softc *);
104 static void nfe_start(if_t);
105 static void nfe_start_locked(if_t);
106 static void nfe_watchdog(if_t);
107 static void nfe_init(void *);
108 static void nfe_init_locked(void *);
109 static void nfe_stop(if_t);
110 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
111 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
112 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
113 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
114 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
115 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
116 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
118 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
119 static int nfe_ifmedia_upd(if_t);
120 static void nfe_ifmedia_sts(if_t, struct ifmediareq *);
121 static void nfe_tick(void *);
122 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
123 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
124 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
126 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
127 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
128 static void nfe_sysctl_node(struct nfe_softc *);
129 static void nfe_stats_clear(struct nfe_softc *);
130 static void nfe_stats_update(struct nfe_softc *);
131 static void nfe_set_linkspeed(struct nfe_softc *);
132 static void nfe_set_wol(struct nfe_softc *);
135 static int nfedebug = 0;
136 #define DPRINTF(sc, ...) do { \
138 device_printf((sc)->nfe_dev, __VA_ARGS__); \
140 #define DPRINTFN(sc, n, ...) do { \
141 if (nfedebug >= (n)) \
142 device_printf((sc)->nfe_dev, __VA_ARGS__); \
145 #define DPRINTF(sc, ...)
146 #define DPRINTFN(sc, n, ...)
149 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
150 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
151 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
154 static int msi_disable = 0;
155 static int msix_disable = 0;
156 static int jumbo_disable = 0;
157 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
158 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
159 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
161 static device_method_t nfe_methods[] = {
162 /* Device interface */
163 DEVMETHOD(device_probe, nfe_probe),
164 DEVMETHOD(device_attach, nfe_attach),
165 DEVMETHOD(device_detach, nfe_detach),
166 DEVMETHOD(device_suspend, nfe_suspend),
167 DEVMETHOD(device_resume, nfe_resume),
168 DEVMETHOD(device_shutdown, nfe_shutdown),
171 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
172 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
173 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
178 static driver_t nfe_driver = {
181 sizeof(struct nfe_softc)
184 static devclass_t nfe_devclass;
186 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
187 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
189 static struct nfe_type nfe_devs[] = {
190 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
191 "NVIDIA nForce MCP Networking Adapter"},
192 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
193 "NVIDIA nForce2 MCP2 Networking Adapter"},
194 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
195 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
196 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
197 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
198 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
199 "NVIDIA nForce3 MCP3 Networking Adapter"},
200 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
201 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
202 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
203 "NVIDIA nForce3 MCP7 Networking Adapter"},
204 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
205 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
206 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
207 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
208 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
209 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
210 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
211 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
212 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
213 "NVIDIA nForce 430 MCP12 Networking Adapter"},
214 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
215 "NVIDIA nForce 430 MCP13 Networking Adapter"},
216 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
217 "NVIDIA nForce MCP55 Networking Adapter"},
218 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
219 "NVIDIA nForce MCP55 Networking Adapter"},
220 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
221 "NVIDIA nForce MCP61 Networking Adapter"},
222 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
223 "NVIDIA nForce MCP61 Networking Adapter"},
224 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
225 "NVIDIA nForce MCP61 Networking Adapter"},
226 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
227 "NVIDIA nForce MCP61 Networking Adapter"},
228 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
229 "NVIDIA nForce MCP65 Networking Adapter"},
230 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
231 "NVIDIA nForce MCP65 Networking Adapter"},
232 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
233 "NVIDIA nForce MCP65 Networking Adapter"},
234 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
235 "NVIDIA nForce MCP65 Networking Adapter"},
236 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
237 "NVIDIA nForce MCP67 Networking Adapter"},
238 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
239 "NVIDIA nForce MCP67 Networking Adapter"},
240 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
241 "NVIDIA nForce MCP67 Networking Adapter"},
242 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
243 "NVIDIA nForce MCP67 Networking Adapter"},
244 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
245 "NVIDIA nForce MCP73 Networking Adapter"},
246 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
247 "NVIDIA nForce MCP73 Networking Adapter"},
248 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
249 "NVIDIA nForce MCP73 Networking Adapter"},
250 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
251 "NVIDIA nForce MCP73 Networking Adapter"},
252 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
253 "NVIDIA nForce MCP77 Networking Adapter"},
254 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
255 "NVIDIA nForce MCP77 Networking Adapter"},
256 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
257 "NVIDIA nForce MCP77 Networking Adapter"},
258 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
259 "NVIDIA nForce MCP77 Networking Adapter"},
260 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
261 "NVIDIA nForce MCP79 Networking Adapter"},
262 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
263 "NVIDIA nForce MCP79 Networking Adapter"},
264 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
265 "NVIDIA nForce MCP79 Networking Adapter"},
266 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
267 "NVIDIA nForce MCP79 Networking Adapter"},
272 /* Probe for supported hardware ID's */
274 nfe_probe(device_t dev)
279 /* Check for matching PCI DEVICE ID's */
280 while (t->name != NULL) {
281 if ((pci_get_vendor(dev) == t->vid_id) &&
282 (pci_get_device(dev) == t->dev_id)) {
283 device_set_desc(dev, t->name);
284 return (BUS_PROBE_DEFAULT);
293 nfe_alloc_msix(struct nfe_softc *sc, int count)
298 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
300 if (sc->nfe_msix_res == NULL) {
301 device_printf(sc->nfe_dev,
302 "couldn't allocate MSIX table resource\n");
306 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
307 SYS_RES_MEMORY, &rid, RF_ACTIVE);
308 if (sc->nfe_msix_pba_res == NULL) {
309 device_printf(sc->nfe_dev,
310 "couldn't allocate MSIX PBA resource\n");
311 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
313 sc->nfe_msix_res = NULL;
317 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
318 if (count == NFE_MSI_MESSAGES) {
320 device_printf(sc->nfe_dev,
321 "Using %d MSIX messages\n", count);
325 device_printf(sc->nfe_dev,
326 "couldn't allocate MSIX\n");
327 pci_release_msi(sc->nfe_dev);
328 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
329 PCIR_BAR(3), sc->nfe_msix_pba_res);
330 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
331 PCIR_BAR(2), sc->nfe_msix_res);
332 sc->nfe_msix_pba_res = NULL;
333 sc->nfe_msix_res = NULL;
340 nfe_detect_msik9(struct nfe_softc *sc)
342 static const char *maker = "MSI";
343 static const char *product = "K9N6PGM2-V2 (MS-7309)";
348 m = getenv("smbios.planar.maker");
349 p = getenv("smbios.planar.product");
350 if (m != NULL && p != NULL) {
351 if (strcmp(m, maker) == 0 && strcmp(p, product) == 0)
364 nfe_attach(device_t dev)
366 struct nfe_softc *sc;
368 bus_addr_t dma_addr_max;
369 int error = 0, i, msic, phyloc, reg, rid;
371 sc = device_get_softc(dev);
374 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
376 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
378 pci_enable_busmaster(dev);
381 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
383 if (sc->nfe_res[0] == NULL) {
384 device_printf(dev, "couldn't map memory resources\n");
385 mtx_destroy(&sc->nfe_mtx);
389 if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) {
392 v = pci_read_config(dev, reg + 0x08, 2);
393 /* Change max. read request size to 4096. */
396 pci_write_config(dev, reg + 0x08, v, 2);
398 v = pci_read_config(dev, reg + 0x0c, 2);
399 /* link capability */
401 width = pci_read_config(dev, reg + 0x12, 2);
402 /* negotiated link width */
403 width = (width >> 4) & 0x3f;
405 device_printf(sc->nfe_dev,
406 "warning, negotiated width of link(x%d) != "
407 "max. width of link(x%d)\n", width, v);
410 if (nfe_can_use_msix(sc) == 0) {
411 device_printf(sc->nfe_dev,
412 "MSI/MSI-X capability black-listed, will use INTx\n");
417 /* Allocate interrupt */
418 if (msix_disable == 0 || msi_disable == 0) {
419 if (msix_disable == 0 &&
420 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
421 nfe_alloc_msix(sc, msic);
422 if (msi_disable == 0 && sc->nfe_msix == 0 &&
423 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
424 pci_alloc_msi(dev, &msic) == 0) {
425 if (msic == NFE_MSI_MESSAGES) {
428 "Using %d MSI messages\n", msic);
431 pci_release_msi(dev);
435 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
437 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
438 RF_SHAREABLE | RF_ACTIVE);
439 if (sc->nfe_irq[0] == NULL) {
440 device_printf(dev, "couldn't allocate IRQ resources\n");
445 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
446 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
447 SYS_RES_IRQ, &rid, RF_ACTIVE);
448 if (sc->nfe_irq[i] == NULL) {
450 "couldn't allocate IRQ resources for "
451 "message %d\n", rid);
456 /* Map interrupts to vector 0. */
457 if (sc->nfe_msix != 0) {
458 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
459 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
460 } else if (sc->nfe_msi != 0) {
461 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
462 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
466 /* Set IRQ status/mask register. */
467 sc->nfe_irq_status = NFE_IRQ_STATUS;
468 sc->nfe_irq_mask = NFE_IRQ_MASK;
469 sc->nfe_intrs = NFE_IRQ_WANTED;
471 if (sc->nfe_msix != 0) {
472 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
473 sc->nfe_nointrs = NFE_IRQ_WANTED;
474 } else if (sc->nfe_msi != 0) {
475 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
476 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
479 sc->nfe_devid = pci_get_device(dev);
480 sc->nfe_revid = pci_get_revid(dev);
483 switch (sc->nfe_devid) {
484 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
485 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
486 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
487 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
488 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
490 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
491 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
492 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
494 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
495 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
496 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
497 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
498 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
501 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
502 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
503 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
504 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
507 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
508 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
509 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
510 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
511 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
512 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
513 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
514 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
515 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
516 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
517 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
518 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
519 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
520 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
522 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
523 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
524 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
525 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
526 /* XXX flow control */
527 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
528 NFE_CORRECT_MACADDR | NFE_MIB_V3;
530 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
531 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
532 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
533 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
534 /* XXX flow control */
535 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
536 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
538 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
539 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
540 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
541 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
542 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
543 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
549 /* Check for reversed ethernet address */
550 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
551 sc->nfe_flags |= NFE_CORRECT_MACADDR;
552 nfe_get_macaddr(sc, sc->eaddr);
554 * Allocate the parent bus DMA tag appropriate for PCI.
556 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
557 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
558 dma_addr_max = NFE_DMA_MAXADDR;
559 error = bus_dma_tag_create(
560 bus_get_dma_tag(sc->nfe_dev), /* parent */
561 1, 0, /* alignment, boundary */
562 dma_addr_max, /* lowaddr */
563 BUS_SPACE_MAXADDR, /* highaddr */
564 NULL, NULL, /* filter, filterarg */
565 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
566 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
568 NULL, NULL, /* lockfunc, lockarg */
569 &sc->nfe_parent_tag);
573 ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER);
575 device_printf(dev, "can not if_gethandle()\n");
581 * Allocate Tx and Rx rings.
583 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
586 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
589 nfe_alloc_jrx_ring(sc, &sc->jrxq);
590 /* Create sysctl node. */
593 if_setsoftc(ifp, sc);
594 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
595 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
596 if_setioctlfn(ifp, nfe_ioctl);
597 if_setstartfn(ifp, nfe_start);
598 if_sethwassist(ifp, 0);
599 if_setcapabilities(ifp, 0);
600 if_setinitfn(ifp, nfe_init);
601 if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1);
602 if_setsendqready(ifp);
605 if (sc->nfe_flags & NFE_HW_CSUM) {
606 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0);
607 if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0);
609 if_setcapenable(ifp, if_getcapabilities(ifp));
611 sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
612 /* VLAN capability setup. */
613 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
614 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
615 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
616 if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0)
617 if_setcapabilitiesbit(ifp,
618 (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0);
621 if (pci_find_cap(dev, PCIY_PMG, ®) == 0)
622 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
623 if_setcapenable(ifp, if_getcapabilities(ifp));
626 * Tell the upper layer(s) we support long frames.
627 * Must appear after the call to ether_ifattach() because
628 * ether_ifattach() sets ifi_hdrlen to the default value.
630 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
632 #ifdef DEVICE_POLLING
633 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0);
637 phyloc = MII_PHY_ANY;
638 if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 ||
639 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 ||
640 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 ||
641 sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) {
642 if (nfe_detect_msik9(sc) != 0)
645 error = mii_attach(dev, &sc->nfe_miibus, ifp,
646 (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts,
647 BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE);
649 device_printf(dev, "attaching PHYs failed\n");
652 ether_ifattach(ifp, sc->eaddr);
654 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
655 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
656 taskqueue_thread_enqueue, &sc->nfe_tq);
657 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
658 device_get_nameunit(sc->nfe_dev));
660 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
661 error = bus_setup_intr(dev, sc->nfe_irq[0],
662 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
663 &sc->nfe_intrhand[0]);
665 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
666 error = bus_setup_intr(dev, sc->nfe_irq[i],
667 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
668 &sc->nfe_intrhand[i]);
674 device_printf(dev, "couldn't set up irq\n");
675 taskqueue_free(sc->nfe_tq);
690 nfe_detach(device_t dev)
692 struct nfe_softc *sc;
694 uint8_t eaddr[ETHER_ADDR_LEN];
697 sc = device_get_softc(dev);
698 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
701 #ifdef DEVICE_POLLING
702 if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING)
703 ether_poll_deregister(ifp);
705 if (device_is_attached(dev)) {
708 if_setflagbits(ifp, 0, IFF_UP);
710 callout_drain(&sc->nfe_stat_ch);
715 /* restore ethernet address */
716 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
717 for (i = 0; i < ETHER_ADDR_LEN; i++) {
718 eaddr[i] = sc->eaddr[5 - i];
721 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
722 nfe_set_macaddr(sc, eaddr);
726 device_delete_child(dev, sc->nfe_miibus);
727 bus_generic_detach(dev);
728 if (sc->nfe_tq != NULL) {
729 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
730 taskqueue_free(sc->nfe_tq);
734 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
735 if (sc->nfe_intrhand[i] != NULL) {
736 bus_teardown_intr(dev, sc->nfe_irq[i],
737 sc->nfe_intrhand[i]);
738 sc->nfe_intrhand[i] = NULL;
742 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
743 if (sc->nfe_irq[0] != NULL)
744 bus_release_resource(dev, SYS_RES_IRQ, 0,
747 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
748 if (sc->nfe_irq[i] != NULL) {
749 bus_release_resource(dev, SYS_RES_IRQ, rid,
751 sc->nfe_irq[i] = NULL;
754 pci_release_msi(dev);
756 if (sc->nfe_msix_pba_res != NULL) {
757 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
758 sc->nfe_msix_pba_res);
759 sc->nfe_msix_pba_res = NULL;
761 if (sc->nfe_msix_res != NULL) {
762 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
764 sc->nfe_msix_res = NULL;
766 if (sc->nfe_res[0] != NULL) {
767 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
769 sc->nfe_res[0] = NULL;
772 nfe_free_tx_ring(sc, &sc->txq);
773 nfe_free_rx_ring(sc, &sc->rxq);
774 nfe_free_jrx_ring(sc, &sc->jrxq);
776 if (sc->nfe_parent_tag) {
777 bus_dma_tag_destroy(sc->nfe_parent_tag);
778 sc->nfe_parent_tag = NULL;
781 mtx_destroy(&sc->nfe_mtx);
788 nfe_suspend(device_t dev)
790 struct nfe_softc *sc;
792 sc = device_get_softc(dev);
795 nfe_stop(sc->nfe_ifp);
797 sc->nfe_suspended = 1;
805 nfe_resume(device_t dev)
807 struct nfe_softc *sc;
810 sc = device_get_softc(dev);
815 if (if_getflags(ifp) & IFF_UP)
817 sc->nfe_suspended = 0;
825 nfe_can_use_msix(struct nfe_softc *sc)
827 static struct msix_blacklist {
830 } msix_blacklists[] = {
831 { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" }
834 struct msix_blacklist *mblp;
835 char *maker, *product;
836 int count, n, use_msix;
839 * Search base board manufacturer and product name table
840 * to see this system has a known MSI/MSI-X issue.
842 maker = getenv("smbios.planar.maker");
843 product = getenv("smbios.planar.product");
845 if (maker != NULL && product != NULL) {
846 count = sizeof(msix_blacklists) / sizeof(msix_blacklists[0]);
847 mblp = msix_blacklists;
848 for (n = 0; n < count; n++) {
849 if (strcmp(maker, mblp->maker) == 0 &&
850 strcmp(product, mblp->product) == 0) {
866 /* Take PHY/NIC out of powerdown, from Linux */
868 nfe_power(struct nfe_softc *sc)
872 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
874 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
875 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
877 NFE_WRITE(sc, NFE_MAC_RESET, 0);
879 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
880 pwr = NFE_READ(sc, NFE_PWR2_CTL);
881 pwr &= ~NFE_PWR2_WAKEUP_MASK;
882 if (sc->nfe_revid >= 0xa3 &&
883 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
884 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
885 pwr |= NFE_PWR2_REVA3;
886 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
891 nfe_miibus_statchg(device_t dev)
893 struct nfe_softc *sc;
894 struct mii_data *mii;
896 uint32_t rxctl, txctl;
898 sc = device_get_softc(dev);
900 mii = device_get_softc(sc->nfe_miibus);
904 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
905 (IFM_ACTIVE | IFM_AVALID)) {
906 switch (IFM_SUBTYPE(mii->mii_media_active)) {
917 nfe_mac_config(sc, mii);
918 txctl = NFE_READ(sc, NFE_TX_CTL);
919 rxctl = NFE_READ(sc, NFE_RX_CTL);
920 if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
921 txctl |= NFE_TX_START;
922 rxctl |= NFE_RX_START;
924 txctl &= ~NFE_TX_START;
925 rxctl &= ~NFE_RX_START;
927 NFE_WRITE(sc, NFE_TX_CTL, txctl);
928 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
933 nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii)
935 uint32_t link, misc, phy, seed;
940 phy = NFE_READ(sc, NFE_PHY_IFACE);
941 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
943 seed = NFE_READ(sc, NFE_RNDSEED);
944 seed &= ~NFE_SEED_MASK;
946 misc = NFE_MISC1_MAGIC;
947 link = NFE_MEDIA_SET;
949 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) {
950 phy |= NFE_PHY_HDX; /* half-duplex */
951 misc |= NFE_MISC1_HDX;
954 switch (IFM_SUBTYPE(mii->mii_media_active)) {
955 case IFM_1000_T: /* full-duplex only */
956 link |= NFE_MEDIA_1000T;
957 seed |= NFE_SEED_1000T;
958 phy |= NFE_PHY_1000T;
961 link |= NFE_MEDIA_100TX;
962 seed |= NFE_SEED_100TX;
963 phy |= NFE_PHY_100TX;
966 link |= NFE_MEDIA_10T;
967 seed |= NFE_SEED_10T;
971 if ((phy & 0x10000000) != 0) {
972 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
973 val = NFE_R1_MAGIC_1000;
975 val = NFE_R1_MAGIC_10_100;
977 val = NFE_R1_MAGIC_DEFAULT;
978 NFE_WRITE(sc, NFE_SETUP_R1, val);
980 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
982 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
983 NFE_WRITE(sc, NFE_MISC1, misc);
984 NFE_WRITE(sc, NFE_LINKSPEED, link);
986 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
987 /* It seems all hardwares supports Rx pause frames. */
988 val = NFE_READ(sc, NFE_RXFILTER);
989 if ((IFM_OPTIONS(mii->mii_media_active) &
990 IFM_ETH_RXPAUSE) != 0)
991 val |= NFE_PFF_RX_PAUSE;
993 val &= ~NFE_PFF_RX_PAUSE;
994 NFE_WRITE(sc, NFE_RXFILTER, val);
995 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
996 val = NFE_READ(sc, NFE_MISC1);
997 if ((IFM_OPTIONS(mii->mii_media_active) &
998 IFM_ETH_TXPAUSE) != 0) {
999 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1000 NFE_TX_PAUSE_FRAME_ENABLE);
1001 val |= NFE_MISC1_TX_PAUSE;
1003 val &= ~NFE_MISC1_TX_PAUSE;
1004 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1005 NFE_TX_PAUSE_FRAME_DISABLE);
1007 NFE_WRITE(sc, NFE_MISC1, val);
1010 /* disable rx/tx pause frames */
1011 val = NFE_READ(sc, NFE_RXFILTER);
1012 val &= ~NFE_PFF_RX_PAUSE;
1013 NFE_WRITE(sc, NFE_RXFILTER, val);
1014 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
1015 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
1016 NFE_TX_PAUSE_FRAME_DISABLE);
1017 val = NFE_READ(sc, NFE_MISC1);
1018 val &= ~NFE_MISC1_TX_PAUSE;
1019 NFE_WRITE(sc, NFE_MISC1, val);
1026 nfe_miibus_readreg(device_t dev, int phy, int reg)
1028 struct nfe_softc *sc = device_get_softc(dev);
1032 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1034 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1035 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1039 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
1041 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1043 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1046 if (ntries == NFE_TIMEOUT) {
1047 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
1051 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
1052 DPRINTFN(sc, 2, "could not read PHY\n");
1056 val = NFE_READ(sc, NFE_PHY_DATA);
1057 if (val != 0xffffffff && val != 0)
1058 sc->mii_phyaddr = phy;
1060 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
1067 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
1069 struct nfe_softc *sc = device_get_softc(dev);
1073 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1075 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
1076 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
1080 NFE_WRITE(sc, NFE_PHY_DATA, val);
1081 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
1082 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
1084 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
1086 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1090 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1091 device_printf(sc->nfe_dev, "could not write to PHY\n");
1096 struct nfe_dmamap_arg {
1097 bus_addr_t nfe_busaddr;
1101 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1103 struct nfe_dmamap_arg ctx;
1104 struct nfe_rx_data *data;
1106 int i, error, descsize;
1108 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1109 desc = ring->desc64;
1110 descsize = sizeof (struct nfe_desc64);
1112 desc = ring->desc32;
1113 descsize = sizeof (struct nfe_desc32);
1116 ring->cur = ring->next = 0;
1118 error = bus_dma_tag_create(sc->nfe_parent_tag,
1119 NFE_RING_ALIGN, 0, /* alignment, boundary */
1120 BUS_SPACE_MAXADDR, /* lowaddr */
1121 BUS_SPACE_MAXADDR, /* highaddr */
1122 NULL, NULL, /* filter, filterarg */
1123 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1124 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1126 NULL, NULL, /* lockfunc, lockarg */
1127 &ring->rx_desc_tag);
1129 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1133 /* allocate memory to desc */
1134 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1135 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1137 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1140 if (sc->nfe_flags & NFE_40BIT_ADDR)
1141 ring->desc64 = desc;
1143 ring->desc32 = desc;
1145 /* map desc to device visible address space */
1146 ctx.nfe_busaddr = 0;
1147 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1148 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1150 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1153 ring->physaddr = ctx.nfe_busaddr;
1155 error = bus_dma_tag_create(sc->nfe_parent_tag,
1156 1, 0, /* alignment, boundary */
1157 BUS_SPACE_MAXADDR, /* lowaddr */
1158 BUS_SPACE_MAXADDR, /* highaddr */
1159 NULL, NULL, /* filter, filterarg */
1160 MCLBYTES, 1, /* maxsize, nsegments */
1161 MCLBYTES, /* maxsegsize */
1163 NULL, NULL, /* lockfunc, lockarg */
1164 &ring->rx_data_tag);
1166 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1170 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1172 device_printf(sc->nfe_dev,
1173 "could not create Rx DMA spare map\n");
1178 * Pre-allocate Rx buffers and populate Rx ring.
1180 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1181 data = &sc->rxq.data[i];
1182 data->rx_data_map = NULL;
1184 error = bus_dmamap_create(ring->rx_data_tag, 0,
1185 &data->rx_data_map);
1187 device_printf(sc->nfe_dev,
1188 "could not create Rx DMA map\n");
1199 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1201 struct nfe_dmamap_arg ctx;
1202 struct nfe_rx_data *data;
1204 int i, error, descsize;
1206 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1208 if (jumbo_disable != 0) {
1209 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1210 sc->nfe_jumbo_disable = 1;
1214 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1215 desc = ring->jdesc64;
1216 descsize = sizeof (struct nfe_desc64);
1218 desc = ring->jdesc32;
1219 descsize = sizeof (struct nfe_desc32);
1222 ring->jcur = ring->jnext = 0;
1224 /* Create DMA tag for jumbo Rx ring. */
1225 error = bus_dma_tag_create(sc->nfe_parent_tag,
1226 NFE_RING_ALIGN, 0, /* alignment, boundary */
1227 BUS_SPACE_MAXADDR, /* lowaddr */
1228 BUS_SPACE_MAXADDR, /* highaddr */
1229 NULL, NULL, /* filter, filterarg */
1230 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1232 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1234 NULL, NULL, /* lockfunc, lockarg */
1235 &ring->jrx_desc_tag);
1237 device_printf(sc->nfe_dev,
1238 "could not create jumbo ring DMA tag\n");
1242 /* Create DMA tag for jumbo Rx buffers. */
1243 error = bus_dma_tag_create(sc->nfe_parent_tag,
1244 1, 0, /* alignment, boundary */
1245 BUS_SPACE_MAXADDR, /* lowaddr */
1246 BUS_SPACE_MAXADDR, /* highaddr */
1247 NULL, NULL, /* filter, filterarg */
1248 MJUM9BYTES, /* maxsize */
1250 MJUM9BYTES, /* maxsegsize */
1252 NULL, NULL, /* lockfunc, lockarg */
1253 &ring->jrx_data_tag);
1255 device_printf(sc->nfe_dev,
1256 "could not create jumbo Rx buffer DMA tag\n");
1260 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1261 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1262 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1264 device_printf(sc->nfe_dev,
1265 "could not allocate DMA'able memory for jumbo Rx ring\n");
1268 if (sc->nfe_flags & NFE_40BIT_ADDR)
1269 ring->jdesc64 = desc;
1271 ring->jdesc32 = desc;
1273 ctx.nfe_busaddr = 0;
1274 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1275 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1277 device_printf(sc->nfe_dev,
1278 "could not load DMA'able memory for jumbo Rx ring\n");
1281 ring->jphysaddr = ctx.nfe_busaddr;
1283 /* Create DMA maps for jumbo Rx buffers. */
1284 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1286 device_printf(sc->nfe_dev,
1287 "could not create jumbo Rx DMA spare map\n");
1291 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1292 data = &sc->jrxq.jdata[i];
1293 data->rx_data_map = NULL;
1295 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1296 &data->rx_data_map);
1298 device_printf(sc->nfe_dev,
1299 "could not create jumbo Rx DMA map\n");
1308 * Running without jumbo frame support is ok for most cases
1309 * so don't fail on creating dma tag/map for jumbo frame.
1311 nfe_free_jrx_ring(sc, ring);
1312 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1313 "resource shortage\n");
1314 sc->nfe_jumbo_disable = 1;
1319 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1325 ring->cur = ring->next = 0;
1326 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1327 desc = ring->desc64;
1328 descsize = sizeof (struct nfe_desc64);
1330 desc = ring->desc32;
1331 descsize = sizeof (struct nfe_desc32);
1333 bzero(desc, descsize * NFE_RX_RING_COUNT);
1334 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1335 if (nfe_newbuf(sc, i) != 0)
1339 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1340 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1347 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1353 ring->jcur = ring->jnext = 0;
1354 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1355 desc = ring->jdesc64;
1356 descsize = sizeof (struct nfe_desc64);
1358 desc = ring->jdesc32;
1359 descsize = sizeof (struct nfe_desc32);
1361 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1362 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1363 if (nfe_jnewbuf(sc, i) != 0)
1367 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1368 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1375 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1377 struct nfe_rx_data *data;
1381 if (sc->nfe_flags & NFE_40BIT_ADDR)
1382 desc = ring->desc64;
1384 desc = ring->desc32;
1386 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1387 data = &ring->data[i];
1388 if (data->rx_data_map != NULL) {
1389 bus_dmamap_destroy(ring->rx_data_tag,
1391 data->rx_data_map = NULL;
1393 if (data->m != NULL) {
1398 if (ring->rx_data_tag != NULL) {
1399 if (ring->rx_spare_map != NULL) {
1400 bus_dmamap_destroy(ring->rx_data_tag,
1401 ring->rx_spare_map);
1402 ring->rx_spare_map = NULL;
1404 bus_dma_tag_destroy(ring->rx_data_tag);
1405 ring->rx_data_tag = NULL;
1409 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1410 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1411 ring->desc64 = NULL;
1412 ring->desc32 = NULL;
1414 if (ring->rx_desc_tag != NULL) {
1415 bus_dma_tag_destroy(ring->rx_desc_tag);
1416 ring->rx_desc_tag = NULL;
1422 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1424 struct nfe_rx_data *data;
1428 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1431 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1432 desc = ring->jdesc64;
1433 descsize = sizeof (struct nfe_desc64);
1435 desc = ring->jdesc32;
1436 descsize = sizeof (struct nfe_desc32);
1439 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1440 data = &ring->jdata[i];
1441 if (data->rx_data_map != NULL) {
1442 bus_dmamap_destroy(ring->jrx_data_tag,
1444 data->rx_data_map = NULL;
1446 if (data->m != NULL) {
1451 if (ring->jrx_data_tag != NULL) {
1452 if (ring->jrx_spare_map != NULL) {
1453 bus_dmamap_destroy(ring->jrx_data_tag,
1454 ring->jrx_spare_map);
1455 ring->jrx_spare_map = NULL;
1457 bus_dma_tag_destroy(ring->jrx_data_tag);
1458 ring->jrx_data_tag = NULL;
1462 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1463 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1464 ring->jdesc64 = NULL;
1465 ring->jdesc32 = NULL;
1468 if (ring->jrx_desc_tag != NULL) {
1469 bus_dma_tag_destroy(ring->jrx_desc_tag);
1470 ring->jrx_desc_tag = NULL;
1476 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1478 struct nfe_dmamap_arg ctx;
1483 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1484 desc = ring->desc64;
1485 descsize = sizeof (struct nfe_desc64);
1487 desc = ring->desc32;
1488 descsize = sizeof (struct nfe_desc32);
1492 ring->cur = ring->next = 0;
1494 error = bus_dma_tag_create(sc->nfe_parent_tag,
1495 NFE_RING_ALIGN, 0, /* alignment, boundary */
1496 BUS_SPACE_MAXADDR, /* lowaddr */
1497 BUS_SPACE_MAXADDR, /* highaddr */
1498 NULL, NULL, /* filter, filterarg */
1499 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1500 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1502 NULL, NULL, /* lockfunc, lockarg */
1503 &ring->tx_desc_tag);
1505 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1509 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1510 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1512 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1515 if (sc->nfe_flags & NFE_40BIT_ADDR)
1516 ring->desc64 = desc;
1518 ring->desc32 = desc;
1520 ctx.nfe_busaddr = 0;
1521 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1522 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1524 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1527 ring->physaddr = ctx.nfe_busaddr;
1529 error = bus_dma_tag_create(sc->nfe_parent_tag,
1539 &ring->tx_data_tag);
1541 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1545 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1546 error = bus_dmamap_create(ring->tx_data_tag, 0,
1547 &ring->data[i].tx_data_map);
1549 device_printf(sc->nfe_dev,
1550 "could not create Tx DMA map\n");
1561 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1566 sc->nfe_force_tx = 0;
1568 ring->cur = ring->next = 0;
1569 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1570 desc = ring->desc64;
1571 descsize = sizeof (struct nfe_desc64);
1573 desc = ring->desc32;
1574 descsize = sizeof (struct nfe_desc32);
1576 bzero(desc, descsize * NFE_TX_RING_COUNT);
1578 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1579 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1584 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1586 struct nfe_tx_data *data;
1590 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1591 desc = ring->desc64;
1592 descsize = sizeof (struct nfe_desc64);
1594 desc = ring->desc32;
1595 descsize = sizeof (struct nfe_desc32);
1598 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1599 data = &ring->data[i];
1601 if (data->m != NULL) {
1602 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1603 BUS_DMASYNC_POSTWRITE);
1604 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1608 if (data->tx_data_map != NULL) {
1609 bus_dmamap_destroy(ring->tx_data_tag,
1611 data->tx_data_map = NULL;
1615 if (ring->tx_data_tag != NULL) {
1616 bus_dma_tag_destroy(ring->tx_data_tag);
1617 ring->tx_data_tag = NULL;
1621 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1622 BUS_DMASYNC_POSTWRITE);
1623 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1624 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1625 ring->desc64 = NULL;
1626 ring->desc32 = NULL;
1627 bus_dma_tag_destroy(ring->tx_desc_tag);
1628 ring->tx_desc_tag = NULL;
1632 #ifdef DEVICE_POLLING
1633 static poll_handler_drv_t nfe_poll;
1637 nfe_poll(if_t ifp, enum poll_cmd cmd, int count)
1639 struct nfe_softc *sc = if_getsoftc(ifp);
1645 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
1650 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1651 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1653 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1655 if (!if_sendq_empty(ifp))
1656 nfe_start_locked(ifp);
1658 if (cmd == POLL_AND_CHECK_STATUS) {
1659 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1663 NFE_WRITE(sc, sc->nfe_irq_status, r);
1665 if (r & NFE_IRQ_LINK) {
1666 NFE_READ(sc, NFE_PHY_STATUS);
1667 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1668 DPRINTF(sc, "link state changed\n");
1674 #endif /* DEVICE_POLLING */
1677 nfe_set_intr(struct nfe_softc *sc)
1680 if (sc->nfe_msi != 0)
1681 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1685 /* In MSIX, a write to mask reegisters behaves as XOR. */
1686 static __inline void
1687 nfe_enable_intr(struct nfe_softc *sc)
1690 if (sc->nfe_msix != 0) {
1691 /* XXX Should have a better way to enable interrupts! */
1692 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1693 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1695 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1699 static __inline void
1700 nfe_disable_intr(struct nfe_softc *sc)
1703 if (sc->nfe_msix != 0) {
1704 /* XXX Should have a better way to disable interrupts! */
1705 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1706 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1708 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1713 nfe_ioctl(if_t ifp, u_long cmd, caddr_t data)
1715 struct nfe_softc *sc;
1717 struct mii_data *mii;
1718 int error, init, mask;
1720 sc = if_getsoftc(ifp);
1721 ifr = (struct ifreq *) data;
1726 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1728 else if (if_getmtu(ifp) != ifr->ifr_mtu) {
1729 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1730 (sc->nfe_jumbo_disable != 0)) &&
1731 ifr->ifr_mtu > ETHERMTU)
1735 if_setmtu(ifp, ifr->ifr_mtu);
1736 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
1737 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1738 nfe_init_locked(sc);
1746 if (if_getflags(ifp) & IFF_UP) {
1748 * If only the PROMISC or ALLMULTI flag changes, then
1749 * don't do a full re-init of the chip, just update
1752 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) &&
1753 ((if_getflags(ifp) ^ sc->nfe_if_flags) &
1754 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1757 nfe_init_locked(sc);
1759 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
1762 sc->nfe_if_flags = if_getflags(ifp);
1768 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1777 mii = device_get_softc(sc->nfe_miibus);
1778 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1781 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1782 #ifdef DEVICE_POLLING
1783 if ((mask & IFCAP_POLLING) != 0) {
1784 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1785 error = ether_poll_register_drv(nfe_poll, ifp);
1789 nfe_disable_intr(sc);
1790 if_setcapenablebit(ifp, IFCAP_POLLING, 0);
1793 error = ether_poll_deregister(ifp);
1794 /* Enable interrupt even in error case */
1796 nfe_enable_intr(sc);
1797 if_setcapenablebit(ifp, 0, IFCAP_POLLING);
1801 #endif /* DEVICE_POLLING */
1802 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1803 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0)
1804 if_togglecapenable(ifp, IFCAP_WOL_MAGIC);
1805 if ((mask & IFCAP_TXCSUM) != 0 &&
1806 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) {
1807 if_togglecapenable(ifp, IFCAP_TXCSUM);
1808 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0)
1809 if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0);
1811 if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES);
1813 if ((mask & IFCAP_RXCSUM) != 0 &&
1814 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) {
1815 if_togglecapenable(ifp, IFCAP_RXCSUM);
1818 if ((mask & IFCAP_TSO4) != 0 &&
1819 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) {
1820 if_togglecapenable(ifp, IFCAP_TSO4);
1821 if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0)
1822 if_sethwassistbits(ifp, CSUM_TSO, 0);
1824 if_sethwassistbits(ifp, 0, CSUM_TSO);
1826 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1827 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0)
1828 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
1829 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1830 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
1831 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
1832 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0)
1833 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO);
1838 * It seems that VLAN stripping requires Rx checksum offload.
1839 * Unfortunately FreeBSD has no way to disable only Rx side
1840 * VLAN stripping. So when we know Rx checksum offload is
1841 * disabled turn entire hardware VLAN assist off.
1843 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) {
1844 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
1846 if_setcapenablebit(ifp, 0,
1847 (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO));
1849 if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
1850 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
1856 error = ether_ioctl(ifp, cmd, data);
1867 struct nfe_softc *sc;
1870 sc = (struct nfe_softc *)arg;
1872 status = NFE_READ(sc, sc->nfe_irq_status);
1873 if (status == 0 || status == 0xffffffff)
1874 return (FILTER_STRAY);
1875 nfe_disable_intr(sc);
1876 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1878 return (FILTER_HANDLED);
1883 nfe_int_task(void *arg, int pending)
1885 struct nfe_softc *sc = arg;
1886 if_t ifp = sc->nfe_ifp;
1892 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1893 nfe_enable_intr(sc);
1895 return; /* not for us */
1897 NFE_WRITE(sc, sc->nfe_irq_status, r);
1899 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1901 #ifdef DEVICE_POLLING
1902 if (if_getcapenable(ifp) & IFCAP_POLLING) {
1908 if (r & NFE_IRQ_LINK) {
1909 NFE_READ(sc, NFE_PHY_STATUS);
1910 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1911 DPRINTF(sc, "link state changed\n");
1914 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
1916 nfe_disable_intr(sc);
1922 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1923 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1925 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1929 if (!if_sendq_empty(ifp))
1930 nfe_start_locked(ifp);
1934 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1935 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1939 /* Reenable interrupts. */
1940 nfe_enable_intr(sc);
1944 static __inline void
1945 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1947 struct nfe_desc32 *desc32;
1948 struct nfe_desc64 *desc64;
1949 struct nfe_rx_data *data;
1952 data = &sc->rxq.data[idx];
1955 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1956 desc64 = &sc->rxq.desc64[idx];
1957 /* VLAN packet may have overwritten it. */
1958 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1959 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1960 desc64->length = htole16(m->m_len);
1961 desc64->flags = htole16(NFE_RX_READY);
1963 desc32 = &sc->rxq.desc32[idx];
1964 desc32->length = htole16(m->m_len);
1965 desc32->flags = htole16(NFE_RX_READY);
1970 static __inline void
1971 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1973 struct nfe_desc32 *desc32;
1974 struct nfe_desc64 *desc64;
1975 struct nfe_rx_data *data;
1978 data = &sc->jrxq.jdata[idx];
1981 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1982 desc64 = &sc->jrxq.jdesc64[idx];
1983 /* VLAN packet may have overwritten it. */
1984 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1985 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1986 desc64->length = htole16(m->m_len);
1987 desc64->flags = htole16(NFE_RX_READY);
1989 desc32 = &sc->jrxq.jdesc32[idx];
1990 desc32->length = htole16(m->m_len);
1991 desc32->flags = htole16(NFE_RX_READY);
1997 nfe_newbuf(struct nfe_softc *sc, int idx)
1999 struct nfe_rx_data *data;
2000 struct nfe_desc32 *desc32;
2001 struct nfe_desc64 *desc64;
2003 bus_dma_segment_t segs[1];
2007 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2011 m->m_len = m->m_pkthdr.len = MCLBYTES;
2012 m_adj(m, ETHER_ALIGN);
2014 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
2015 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2019 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2021 data = &sc->rxq.data[idx];
2022 if (data->m != NULL) {
2023 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2024 BUS_DMASYNC_POSTREAD);
2025 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
2027 map = data->rx_data_map;
2028 data->rx_data_map = sc->rxq.rx_spare_map;
2029 sc->rxq.rx_spare_map = map;
2030 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2031 BUS_DMASYNC_PREREAD);
2032 data->paddr = segs[0].ds_addr;
2034 /* update mapping address in h/w descriptor */
2035 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2036 desc64 = &sc->rxq.desc64[idx];
2037 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2038 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2039 desc64->length = htole16(segs[0].ds_len);
2040 desc64->flags = htole16(NFE_RX_READY);
2042 desc32 = &sc->rxq.desc32[idx];
2043 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2044 desc32->length = htole16(segs[0].ds_len);
2045 desc32->flags = htole16(NFE_RX_READY);
2053 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2055 struct nfe_rx_data *data;
2056 struct nfe_desc32 *desc32;
2057 struct nfe_desc64 *desc64;
2059 bus_dma_segment_t segs[1];
2063 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
2066 if ((m->m_flags & M_EXT) == 0) {
2070 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
2071 m_adj(m, ETHER_ALIGN);
2073 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2074 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2078 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2080 data = &sc->jrxq.jdata[idx];
2081 if (data->m != NULL) {
2082 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2083 BUS_DMASYNC_POSTREAD);
2084 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2086 map = data->rx_data_map;
2087 data->rx_data_map = sc->jrxq.jrx_spare_map;
2088 sc->jrxq.jrx_spare_map = map;
2089 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2090 BUS_DMASYNC_PREREAD);
2091 data->paddr = segs[0].ds_addr;
2093 /* update mapping address in h/w descriptor */
2094 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2095 desc64 = &sc->jrxq.jdesc64[idx];
2096 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2097 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2098 desc64->length = htole16(segs[0].ds_len);
2099 desc64->flags = htole16(NFE_RX_READY);
2101 desc32 = &sc->jrxq.jdesc32[idx];
2102 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2103 desc32->length = htole16(segs[0].ds_len);
2104 desc32->flags = htole16(NFE_RX_READY);
2112 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2114 if_t ifp = sc->nfe_ifp;
2115 struct nfe_desc32 *desc32;
2116 struct nfe_desc64 *desc64;
2117 struct nfe_rx_data *data;
2120 int len, prog, rx_npkts;
2124 NFE_LOCK_ASSERT(sc);
2126 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2127 BUS_DMASYNC_POSTREAD);
2129 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2134 data = &sc->rxq.data[sc->rxq.cur];
2136 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2137 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2138 vtag = le32toh(desc64->physaddr[1]);
2139 flags = le16toh(desc64->flags);
2140 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2142 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2143 flags = le16toh(desc32->flags);
2144 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2147 if (flags & NFE_RX_READY)
2150 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2151 if (!(flags & NFE_RX_VALID_V1)) {
2152 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2153 nfe_discard_rxbuf(sc, sc->rxq.cur);
2156 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2157 flags &= ~NFE_RX_ERROR;
2158 len--; /* fix buffer length */
2161 if (!(flags & NFE_RX_VALID_V2)) {
2162 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2163 nfe_discard_rxbuf(sc, sc->rxq.cur);
2167 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2168 flags &= ~NFE_RX_ERROR;
2169 len--; /* fix buffer length */
2173 if (flags & NFE_RX_ERROR) {
2174 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2175 nfe_discard_rxbuf(sc, sc->rxq.cur);
2180 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2181 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2182 nfe_discard_rxbuf(sc, sc->rxq.cur);
2186 if ((vtag & NFE_RX_VTAG) != 0 &&
2187 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2188 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2189 m->m_flags |= M_VLANTAG;
2192 m->m_pkthdr.len = m->m_len = len;
2193 m->m_pkthdr.rcvif = ifp;
2195 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2196 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2197 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2198 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2199 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2200 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2201 m->m_pkthdr.csum_flags |=
2202 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2203 m->m_pkthdr.csum_data = 0xffff;
2208 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2217 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2218 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2220 if (rx_npktsp != NULL)
2221 *rx_npktsp = rx_npkts;
2222 return (count > 0 ? 0 : EAGAIN);
2227 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2229 if_t ifp = sc->nfe_ifp;
2230 struct nfe_desc32 *desc32;
2231 struct nfe_desc64 *desc64;
2232 struct nfe_rx_data *data;
2235 int len, prog, rx_npkts;
2239 NFE_LOCK_ASSERT(sc);
2241 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2242 BUS_DMASYNC_POSTREAD);
2244 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2250 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2252 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2253 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2254 vtag = le32toh(desc64->physaddr[1]);
2255 flags = le16toh(desc64->flags);
2256 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2258 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2259 flags = le16toh(desc32->flags);
2260 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2263 if (flags & NFE_RX_READY)
2266 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2267 if (!(flags & NFE_RX_VALID_V1)) {
2268 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2269 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2272 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2273 flags &= ~NFE_RX_ERROR;
2274 len--; /* fix buffer length */
2277 if (!(flags & NFE_RX_VALID_V2)) {
2278 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2279 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2283 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2284 flags &= ~NFE_RX_ERROR;
2285 len--; /* fix buffer length */
2289 if (flags & NFE_RX_ERROR) {
2290 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2291 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2296 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2297 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2298 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2302 if ((vtag & NFE_RX_VTAG) != 0 &&
2303 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) {
2304 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2305 m->m_flags |= M_VLANTAG;
2308 m->m_pkthdr.len = m->m_len = len;
2309 m->m_pkthdr.rcvif = ifp;
2311 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) {
2312 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2313 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2314 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2315 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2316 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2317 m->m_pkthdr.csum_flags |=
2318 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2319 m->m_pkthdr.csum_data = 0xffff;
2324 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2333 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2334 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2336 if (rx_npktsp != NULL)
2337 *rx_npktsp = rx_npkts;
2338 return (count > 0 ? 0 : EAGAIN);
2343 nfe_txeof(struct nfe_softc *sc)
2345 if_t ifp = sc->nfe_ifp;
2346 struct nfe_desc32 *desc32;
2347 struct nfe_desc64 *desc64;
2348 struct nfe_tx_data *data = NULL;
2352 NFE_LOCK_ASSERT(sc);
2354 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2355 BUS_DMASYNC_POSTREAD);
2358 for (cons = sc->txq.next; cons != sc->txq.cur;
2359 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2360 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2361 desc64 = &sc->txq.desc64[cons];
2362 flags = le16toh(desc64->flags);
2364 desc32 = &sc->txq.desc32[cons];
2365 flags = le16toh(desc32->flags);
2368 if (flags & NFE_TX_VALID)
2373 data = &sc->txq.data[cons];
2375 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2376 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2378 if ((flags & NFE_TX_ERROR_V1) != 0) {
2379 device_printf(sc->nfe_dev,
2380 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2382 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2384 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2386 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2388 if ((flags & NFE_TX_ERROR_V2) != 0) {
2389 device_printf(sc->nfe_dev,
2390 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2391 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2393 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2396 /* last fragment of the mbuf chain transmitted */
2397 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2398 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2399 BUS_DMASYNC_POSTWRITE);
2400 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2406 sc->nfe_force_tx = 0;
2407 sc->txq.next = cons;
2408 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2409 if (sc->txq.queued == 0)
2410 sc->nfe_watchdog_timer = 0;
2415 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2417 struct nfe_desc32 *desc32 = NULL;
2418 struct nfe_desc64 *desc64 = NULL;
2420 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2421 int error, i, nsegs, prod, si;
2423 uint16_t cflags, flags;
2426 prod = si = sc->txq.cur;
2427 map = sc->txq.data[prod].tx_data_map;
2429 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2430 &nsegs, BUS_DMA_NOWAIT);
2431 if (error == EFBIG) {
2432 m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
2439 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2440 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2446 } else if (error != 0)
2454 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2455 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2462 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2463 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2465 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2466 cflags |= NFE_TX_TSO;
2467 } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2468 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2469 cflags |= NFE_TX_IP_CSUM;
2470 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2471 cflags |= NFE_TX_TCP_UDP_CSUM;
2472 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2473 cflags |= NFE_TX_TCP_UDP_CSUM;
2476 for (i = 0; i < nsegs; i++) {
2477 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2478 desc64 = &sc->txq.desc64[prod];
2479 desc64->physaddr[0] =
2480 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2481 desc64->physaddr[1] =
2482 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2484 desc64->length = htole16(segs[i].ds_len - 1);
2485 desc64->flags = htole16(flags);
2487 desc32 = &sc->txq.desc32[prod];
2489 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2490 desc32->length = htole16(segs[i].ds_len - 1);
2491 desc32->flags = htole16(flags);
2495 * Setting of the valid bit in the first descriptor is
2496 * deferred until the whole chain is fully setup.
2498 flags |= NFE_TX_VALID;
2501 NFE_INC(prod, NFE_TX_RING_COUNT);
2505 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2506 * csum flags, vtag and TSO belong to the first fragment only.
2508 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2509 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2510 desc64 = &sc->txq.desc64[si];
2511 if ((m->m_flags & M_VLANTAG) != 0)
2512 desc64->vtag = htole32(NFE_TX_VTAG |
2513 m->m_pkthdr.ether_vtag);
2514 if (tsosegsz != 0) {
2517 * The following indicates the descriptor element
2518 * is a 32bit quantity.
2520 desc64->length |= htole16((uint16_t)tsosegsz);
2521 desc64->flags |= htole16(tsosegsz >> 16);
2524 * finally, set the valid/checksum/TSO bit in the first
2527 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2529 if (sc->nfe_flags & NFE_JUMBO_SUP)
2530 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2532 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2533 desc32 = &sc->txq.desc32[si];
2534 if (tsosegsz != 0) {
2537 * The following indicates the descriptor element
2538 * is a 32bit quantity.
2540 desc32->length |= htole16((uint16_t)tsosegsz);
2541 desc32->flags |= htole16(tsosegsz >> 16);
2544 * finally, set the valid/checksum/TSO bit in the first
2547 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2551 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2552 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2553 sc->txq.data[prod].tx_data_map = map;
2554 sc->txq.data[prod].m = m;
2556 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2563 nfe_setmulti(struct nfe_softc *sc)
2565 if_t ifp = sc->nfe_ifp;
2566 int i, mc_count, mcnt;
2568 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2569 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2570 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2574 NFE_LOCK_ASSERT(sc);
2576 if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2577 bzero(addr, ETHER_ADDR_LEN);
2578 bzero(mask, ETHER_ADDR_LEN);
2582 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2583 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2585 mc_count = if_multiaddr_count(ifp, -1);
2586 mta = malloc(sizeof(uint8_t) * ETHER_ADDR_LEN * mc_count, M_DEVBUF,
2589 /* Unable to get memory - process without filtering */
2591 device_printf(sc->nfe_dev, "nfe_setmulti: failed to allocate"
2592 "temp multicast buffer!\n");
2594 bzero(addr, ETHER_ADDR_LEN);
2595 bzero(mask, ETHER_ADDR_LEN);
2599 if_multiaddr_array(ifp, mta, &mcnt, mc_count);
2601 for (i = 0; i < mcnt; i++) {
2605 addrp = mta + (i * ETHER_ADDR_LEN);
2606 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2607 u_int8_t mcaddr = addrp[j];
2613 free(mta, M_DEVBUF);
2615 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2620 addr[0] |= 0x01; /* make sure multicast bit is set */
2622 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2623 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2624 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2625 addr[5] << 8 | addr[4]);
2626 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2627 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2628 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2629 mask[5] << 8 | mask[4]);
2631 filter = NFE_READ(sc, NFE_RXFILTER);
2632 filter &= NFE_PFF_RX_PAUSE;
2633 filter |= NFE_RXFILTER_MAGIC;
2634 filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2635 NFE_WRITE(sc, NFE_RXFILTER, filter);
2642 struct nfe_softc *sc = if_getsoftc(ifp);
2645 nfe_start_locked(ifp);
2650 nfe_start_locked(if_t ifp)
2652 struct nfe_softc *sc = if_getsoftc(ifp);
2656 NFE_LOCK_ASSERT(sc);
2658 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2659 IFF_DRV_RUNNING || sc->nfe_link == 0)
2662 while (!if_sendq_empty(ifp)) {
2663 m0 = if_dequeue(ifp);
2668 if (nfe_encap(sc, &m0) != 0) {
2671 if_sendq_prepend(ifp, m0);
2672 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
2676 if_etherbpfmtap(ifp, m0);
2680 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2681 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2684 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2687 * Set a timeout in case the chip goes out to lunch.
2689 sc->nfe_watchdog_timer = 5;
2695 nfe_watchdog(if_t ifp)
2697 struct nfe_softc *sc = if_getsoftc(ifp);
2699 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2702 /* Check if we've lost Tx completion interrupt. */
2704 if (sc->txq.queued == 0) {
2705 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2707 if (!if_sendq_empty(ifp))
2708 nfe_start_locked(ifp);
2711 /* Check if we've lost start Tx command. */
2713 if (sc->nfe_force_tx <= 3) {
2715 * If this is the case for watchdog timeout, the following
2716 * code should go to nfe_txeof().
2718 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2721 sc->nfe_force_tx = 0;
2723 if_printf(ifp, "watchdog timeout\n");
2725 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
2726 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2727 nfe_init_locked(sc);
2734 struct nfe_softc *sc = xsc;
2737 nfe_init_locked(sc);
2743 nfe_init_locked(void *xsc)
2745 struct nfe_softc *sc = xsc;
2746 if_t ifp = sc->nfe_ifp;
2747 struct mii_data *mii;
2751 NFE_LOCK_ASSERT(sc);
2753 mii = device_get_softc(sc->nfe_miibus);
2755 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
2760 sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS;
2762 nfe_init_tx_ring(sc, &sc->txq);
2763 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2764 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2766 error = nfe_init_rx_ring(sc, &sc->rxq);
2768 device_printf(sc->nfe_dev,
2769 "initialization failed: no memory for rx buffers\n");
2775 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2776 val |= NFE_MAC_ADDR_INORDER;
2777 NFE_WRITE(sc, NFE_TX_UNK, val);
2778 NFE_WRITE(sc, NFE_STATUS, 0);
2780 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2781 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2783 sc->rxtxctl = NFE_RXTX_BIT2;
2784 if (sc->nfe_flags & NFE_40BIT_ADDR)
2785 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2786 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2787 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2789 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0)
2790 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2791 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2792 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2794 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2796 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2798 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
2799 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2801 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2803 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2805 /* set MAC address */
2806 nfe_set_macaddr(sc, if_getlladdr(ifp));
2808 /* tell MAC where rings are in memory */
2809 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2810 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2811 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2812 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2813 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2815 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2816 NFE_ADDR_HI(sc->rxq.physaddr));
2817 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2818 NFE_ADDR_LO(sc->rxq.physaddr));
2820 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2821 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2823 NFE_WRITE(sc, NFE_RING_SIZE,
2824 (NFE_RX_RING_COUNT - 1) << 16 |
2825 (NFE_TX_RING_COUNT - 1));
2827 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2829 /* force MAC to wakeup */
2830 val = NFE_READ(sc, NFE_PWR_STATE);
2831 if ((val & NFE_PWR_WAKEUP) == 0)
2832 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2834 val = NFE_READ(sc, NFE_PWR_STATE);
2835 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2838 /* configure interrupts coalescing/mitigation */
2839 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2841 /* no interrupt mitigation: one interrupt per packet */
2842 NFE_WRITE(sc, NFE_IMTIMER, 970);
2845 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2846 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2847 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2849 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2850 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2852 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2854 NFE_WRITE(sc, NFE_WOL_CTL, 0);
2856 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2857 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2859 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2865 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2868 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2870 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2872 /* Clear hardware stats. */
2873 nfe_stats_clear(sc);
2875 #ifdef DEVICE_POLLING
2876 if (if_getcapenable(ifp) & IFCAP_POLLING)
2877 nfe_disable_intr(sc);
2881 nfe_enable_intr(sc); /* enable interrupts */
2883 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
2884 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
2889 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2896 struct nfe_softc *sc = if_getsoftc(ifp);
2897 struct nfe_rx_ring *rx_ring;
2898 struct nfe_jrx_ring *jrx_ring;
2899 struct nfe_tx_ring *tx_ring;
2900 struct nfe_rx_data *rdata;
2901 struct nfe_tx_data *tdata;
2904 NFE_LOCK_ASSERT(sc);
2906 sc->nfe_watchdog_timer = 0;
2907 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
2909 callout_stop(&sc->nfe_stat_ch);
2912 NFE_WRITE(sc, NFE_TX_CTL, 0);
2915 NFE_WRITE(sc, NFE_RX_CTL, 0);
2917 /* disable interrupts */
2918 nfe_disable_intr(sc);
2922 /* free Rx and Tx mbufs still in the queues. */
2924 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2925 rdata = &rx_ring->data[i];
2926 if (rdata->m != NULL) {
2927 bus_dmamap_sync(rx_ring->rx_data_tag,
2928 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2929 bus_dmamap_unload(rx_ring->rx_data_tag,
2930 rdata->rx_data_map);
2936 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2937 jrx_ring = &sc->jrxq;
2938 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2939 rdata = &jrx_ring->jdata[i];
2940 if (rdata->m != NULL) {
2941 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2942 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2943 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2944 rdata->rx_data_map);
2952 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2953 tdata = &tx_ring->data[i];
2954 if (tdata->m != NULL) {
2955 bus_dmamap_sync(tx_ring->tx_data_tag,
2956 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2957 bus_dmamap_unload(tx_ring->tx_data_tag,
2958 tdata->tx_data_map);
2963 /* Update hardware stats. */
2964 nfe_stats_update(sc);
2969 nfe_ifmedia_upd(if_t ifp)
2971 struct nfe_softc *sc = if_getsoftc(ifp);
2972 struct mii_data *mii;
2975 mii = device_get_softc(sc->nfe_miibus);
2984 nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
2986 struct nfe_softc *sc;
2987 struct mii_data *mii;
2989 sc = if_getsoftc(ifp);
2992 mii = device_get_softc(sc->nfe_miibus);
2995 ifmr->ifm_active = mii->mii_media_active;
2996 ifmr->ifm_status = mii->mii_media_status;
3004 struct nfe_softc *sc;
3005 struct mii_data *mii;
3008 sc = (struct nfe_softc *)xsc;
3010 NFE_LOCK_ASSERT(sc);
3014 mii = device_get_softc(sc->nfe_miibus);
3016 nfe_stats_update(sc);
3018 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
3023 nfe_shutdown(device_t dev)
3026 return (nfe_suspend(dev));
3031 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
3035 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
3036 val = NFE_READ(sc, NFE_MACADDR_LO);
3037 addr[0] = (val >> 8) & 0xff;
3038 addr[1] = (val & 0xff);
3040 val = NFE_READ(sc, NFE_MACADDR_HI);
3041 addr[2] = (val >> 24) & 0xff;
3042 addr[3] = (val >> 16) & 0xff;
3043 addr[4] = (val >> 8) & 0xff;
3044 addr[5] = (val & 0xff);
3046 val = NFE_READ(sc, NFE_MACADDR_LO);
3047 addr[5] = (val >> 8) & 0xff;
3048 addr[4] = (val & 0xff);
3050 val = NFE_READ(sc, NFE_MACADDR_HI);
3051 addr[3] = (val >> 24) & 0xff;
3052 addr[2] = (val >> 16) & 0xff;
3053 addr[1] = (val >> 8) & 0xff;
3054 addr[0] = (val & 0xff);
3060 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3063 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3064 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3065 addr[1] << 8 | addr[0]);
3070 * Map a single buffer address.
3074 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3076 struct nfe_dmamap_arg *ctx;
3081 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3083 ctx = (struct nfe_dmamap_arg *)arg;
3084 ctx->nfe_busaddr = segs[0].ds_addr;
3089 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3095 value = *(int *)arg1;
3096 error = sysctl_handle_int(oidp, &value, 0, req);
3097 if (error || !req->newptr)
3099 if (value < low || value > high)
3101 *(int *)arg1 = value;
3108 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3111 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3116 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3117 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3118 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3119 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3122 nfe_sysctl_node(struct nfe_softc *sc)
3124 struct sysctl_ctx_list *ctx;
3125 struct sysctl_oid_list *child, *parent;
3126 struct sysctl_oid *tree;
3127 struct nfe_hw_stats *stats;
3130 stats = &sc->nfe_stats;
3131 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3132 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3133 SYSCTL_ADD_PROC(ctx, child,
3134 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3135 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3136 "max number of Rx events to process");
3138 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3139 error = resource_int_value(device_get_name(sc->nfe_dev),
3140 device_get_unit(sc->nfe_dev), "process_limit",
3141 &sc->nfe_process_limit);
3143 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3144 sc->nfe_process_limit > NFE_PROC_MAX) {
3145 device_printf(sc->nfe_dev,
3146 "process_limit value out of range; "
3147 "using default: %d\n", NFE_PROC_DEFAULT);
3148 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3152 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3155 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3156 NULL, "NFE statistics");
3157 parent = SYSCTL_CHILDREN(tree);
3159 /* Rx statistics. */
3160 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3161 NULL, "Rx MAC statistics");
3162 child = SYSCTL_CHILDREN(tree);
3164 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3165 &stats->rx_frame_errors, "Framing Errors");
3166 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3167 &stats->rx_extra_bytes, "Extra Bytes");
3168 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3169 &stats->rx_late_cols, "Late Collisions");
3170 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3171 &stats->rx_runts, "Runts");
3172 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3173 &stats->rx_jumbos, "Jumbos");
3174 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3175 &stats->rx_fifo_overuns, "FIFO Overruns");
3176 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3177 &stats->rx_crc_errors, "CRC Errors");
3178 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3179 &stats->rx_fae, "Frame Alignment Errors");
3180 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3181 &stats->rx_len_errors, "Length Errors");
3182 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3183 &stats->rx_unicast, "Unicast Frames");
3184 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3185 &stats->rx_multicast, "Multicast Frames");
3186 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3187 &stats->rx_broadcast, "Broadcast Frames");
3188 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3189 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3190 &stats->rx_octets, "Octets");
3191 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3192 &stats->rx_pause, "Pause frames");
3193 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3194 &stats->rx_drops, "Drop frames");
3197 /* Tx statistics. */
3198 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3199 NULL, "Tx MAC statistics");
3200 child = SYSCTL_CHILDREN(tree);
3201 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3202 &stats->tx_octets, "Octets");
3203 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3204 &stats->tx_zero_rexmits, "Zero Retransmits");
3205 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3206 &stats->tx_one_rexmits, "One Retransmits");
3207 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3208 &stats->tx_multi_rexmits, "Multiple Retransmits");
3209 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3210 &stats->tx_late_cols, "Late Collisions");
3211 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3212 &stats->tx_fifo_underuns, "FIFO Underruns");
3213 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3214 &stats->tx_carrier_losts, "Carrier Losts");
3215 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3216 &stats->tx_excess_deferals, "Excess Deferrals");
3217 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3218 &stats->tx_retry_errors, "Retry Errors");
3219 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3220 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3221 &stats->tx_deferals, "Deferrals");
3222 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3223 &stats->tx_frames, "Frames");
3224 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3225 &stats->tx_pause, "Pause Frames");
3227 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3228 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3229 &stats->tx_deferals, "Unicast Frames");
3230 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3231 &stats->tx_frames, "Multicast Frames");
3232 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3233 &stats->tx_pause, "Broadcast Frames");
3237 #undef NFE_SYSCTL_STAT_ADD32
3238 #undef NFE_SYSCTL_STAT_ADD64
3241 nfe_stats_clear(struct nfe_softc *sc)
3245 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3246 mib_cnt = NFE_NUM_MIB_STATV1;
3247 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3248 mib_cnt = NFE_NUM_MIB_STATV2;
3252 for (i = 0; i < mib_cnt; i++)
3253 NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t));
3255 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3256 NFE_READ(sc, NFE_TX_UNICAST);
3257 NFE_READ(sc, NFE_TX_MULTICAST);
3258 NFE_READ(sc, NFE_TX_BROADCAST);
3263 nfe_stats_update(struct nfe_softc *sc)
3265 struct nfe_hw_stats *stats;
3267 NFE_LOCK_ASSERT(sc);
3269 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3272 stats = &sc->nfe_stats;
3273 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3274 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3275 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3276 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3277 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3278 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3279 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3280 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3281 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3282 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3283 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3284 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3285 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3286 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3287 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3288 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3289 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3290 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3291 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3292 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3293 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3295 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3296 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3297 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3298 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3299 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3300 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3301 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3304 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3305 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3306 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3307 stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);
3313 nfe_set_linkspeed(struct nfe_softc *sc)
3315 struct mii_softc *miisc;
3316 struct mii_data *mii;
3319 NFE_LOCK_ASSERT(sc);
3321 mii = device_get_softc(sc->nfe_miibus);
3324 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
3325 (IFM_ACTIVE | IFM_AVALID)) {
3326 switch IFM_SUBTYPE(mii->mii_media_active) {
3337 miisc = LIST_FIRST(&mii->mii_phys);
3338 phyno = miisc->mii_phy;
3339 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3341 nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0);
3342 nfe_miibus_writereg(sc->nfe_dev, phyno,
3343 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
3344 nfe_miibus_writereg(sc->nfe_dev, phyno,
3345 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
3349 * Poll link state until nfe(4) get a 10/100Mbps link.
3351 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
3353 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID))
3354 == (IFM_ACTIVE | IFM_AVALID)) {
3355 switch (IFM_SUBTYPE(mii->mii_media_active)) {
3358 nfe_mac_config(sc, mii);
3365 pause("nfelnk", hz);
3368 if (i == MII_ANEGTICKS_GIGE)
3369 device_printf(sc->nfe_dev,
3370 "establishing a link failed, WOL may not work!");
3373 * No link, force MAC to have 100Mbps, full-duplex link.
3374 * This is the last resort and may/may not work.
3376 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
3377 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
3378 nfe_mac_config(sc, mii);
3383 nfe_set_wol(struct nfe_softc *sc)
3390 NFE_LOCK_ASSERT(sc);
3392 if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
3395 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
3396 wolctl = NFE_WOL_MAGIC;
3399 NFE_WRITE(sc, NFE_WOL_CTL, wolctl);
3400 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
3401 nfe_set_linkspeed(sc);
3402 if ((sc->nfe_flags & NFE_PWR_MGMT) != 0)
3403 NFE_WRITE(sc, NFE_PWR2_CTL,
3404 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS);
3406 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0);
3407 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0);
3408 NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) |
3411 /* Request PME if WOL is requested. */
3412 pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
3413 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
3414 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
3415 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3416 pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);