1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
71 /* "device miibus" required. See GENERIC if you get errors here. */
72 #include "miibus_if.h"
74 static int nfe_probe(device_t);
75 static int nfe_attach(device_t);
76 static int nfe_detach(device_t);
77 static int nfe_suspend(device_t);
78 static int nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static void nfe_power(struct nfe_softc *);
81 static int nfe_miibus_readreg(device_t, int, int);
82 static int nfe_miibus_writereg(device_t, int, int, int);
83 static void nfe_miibus_statchg(device_t);
84 static void nfe_link_task(void *, int);
85 static void nfe_set_intr(struct nfe_softc *);
86 static __inline void nfe_enable_intr(struct nfe_softc *);
87 static __inline void nfe_disable_intr(struct nfe_softc *);
88 static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
89 static void nfe_alloc_msix(struct nfe_softc *, int);
90 static int nfe_intr(void *);
91 static void nfe_int_task(void *, int);
92 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
93 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
94 static int nfe_newbuf(struct nfe_softc *, int);
95 static int nfe_jnewbuf(struct nfe_softc *, int);
96 static int nfe_rxeof(struct nfe_softc *, int, int *);
97 static int nfe_jrxeof(struct nfe_softc *, int, int *);
98 static void nfe_txeof(struct nfe_softc *);
99 static int nfe_encap(struct nfe_softc *, struct mbuf **);
100 static void nfe_setmulti(struct nfe_softc *);
101 static void nfe_tx_task(void *, int);
102 static void nfe_start(struct ifnet *);
103 static void nfe_watchdog(struct ifnet *);
104 static void nfe_init(void *);
105 static void nfe_init_locked(void *);
106 static void nfe_stop(struct ifnet *);
107 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
108 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
109 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
114 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
115 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static int nfe_ifmedia_upd(struct ifnet *);
117 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
118 static void nfe_tick(void *);
119 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
120 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
121 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
123 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
124 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
125 static void nfe_sysctl_node(struct nfe_softc *);
126 static void nfe_stats_clear(struct nfe_softc *);
127 static void nfe_stats_update(struct nfe_softc *);
130 static int nfedebug = 0;
131 #define DPRINTF(sc, ...) do { \
133 device_printf((sc)->nfe_dev, __VA_ARGS__); \
135 #define DPRINTFN(sc, n, ...) do { \
136 if (nfedebug >= (n)) \
137 device_printf((sc)->nfe_dev, __VA_ARGS__); \
140 #define DPRINTF(sc, ...)
141 #define DPRINTFN(sc, n, ...)
144 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
145 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
146 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
149 static int msi_disable = 0;
150 static int msix_disable = 0;
151 static int jumbo_disable = 0;
152 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
153 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
154 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
156 static device_method_t nfe_methods[] = {
157 /* Device interface */
158 DEVMETHOD(device_probe, nfe_probe),
159 DEVMETHOD(device_attach, nfe_attach),
160 DEVMETHOD(device_detach, nfe_detach),
161 DEVMETHOD(device_suspend, nfe_suspend),
162 DEVMETHOD(device_resume, nfe_resume),
163 DEVMETHOD(device_shutdown, nfe_shutdown),
166 DEVMETHOD(bus_print_child, bus_generic_print_child),
167 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
170 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
171 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
172 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
177 static driver_t nfe_driver = {
180 sizeof(struct nfe_softc)
183 static devclass_t nfe_devclass;
185 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
186 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
188 static struct nfe_type nfe_devs[] = {
189 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
190 "NVIDIA nForce MCP Networking Adapter"},
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
192 "NVIDIA nForce2 MCP2 Networking Adapter"},
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
194 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
196 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
198 "NVIDIA nForce3 MCP3 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
200 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
202 "NVIDIA nForce3 MCP7 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
204 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
206 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
208 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
210 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
212 "NVIDIA nForce 430 MCP12 Networking Adapter"},
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
214 "NVIDIA nForce 430 MCP13 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
216 "NVIDIA nForce MCP55 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
218 "NVIDIA nForce MCP55 Networking Adapter"},
219 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
220 "NVIDIA nForce MCP61 Networking Adapter"},
221 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
222 "NVIDIA nForce MCP61 Networking Adapter"},
223 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
224 "NVIDIA nForce MCP61 Networking Adapter"},
225 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
226 "NVIDIA nForce MCP61 Networking Adapter"},
227 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
228 "NVIDIA nForce MCP65 Networking Adapter"},
229 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
230 "NVIDIA nForce MCP65 Networking Adapter"},
231 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
232 "NVIDIA nForce MCP65 Networking Adapter"},
233 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
234 "NVIDIA nForce MCP65 Networking Adapter"},
235 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
236 "NVIDIA nForce MCP67 Networking Adapter"},
237 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
238 "NVIDIA nForce MCP67 Networking Adapter"},
239 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
240 "NVIDIA nForce MCP67 Networking Adapter"},
241 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
242 "NVIDIA nForce MCP67 Networking Adapter"},
243 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1,
244 "NVIDIA nForce MCP73 Networking Adapter"},
245 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2,
246 "NVIDIA nForce MCP73 Networking Adapter"},
247 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3,
248 "NVIDIA nForce MCP73 Networking Adapter"},
249 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4,
250 "NVIDIA nForce MCP73 Networking Adapter"},
251 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1,
252 "NVIDIA nForce MCP77 Networking Adapter"},
253 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2,
254 "NVIDIA nForce MCP77 Networking Adapter"},
255 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3,
256 "NVIDIA nForce MCP77 Networking Adapter"},
257 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4,
258 "NVIDIA nForce MCP77 Networking Adapter"},
259 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1,
260 "NVIDIA nForce MCP79 Networking Adapter"},
261 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2,
262 "NVIDIA nForce MCP79 Networking Adapter"},
263 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3,
264 "NVIDIA nForce MCP79 Networking Adapter"},
265 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4,
266 "NVIDIA nForce MCP79 Networking Adapter"},
271 /* Probe for supported hardware ID's */
273 nfe_probe(device_t dev)
278 /* Check for matching PCI DEVICE ID's */
279 while (t->name != NULL) {
280 if ((pci_get_vendor(dev) == t->vid_id) &&
281 (pci_get_device(dev) == t->dev_id)) {
282 device_set_desc(dev, t->name);
283 return (BUS_PROBE_DEFAULT);
292 nfe_alloc_msix(struct nfe_softc *sc, int count)
297 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
299 if (sc->nfe_msix_res == NULL) {
300 device_printf(sc->nfe_dev,
301 "couldn't allocate MSIX table resource\n");
305 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
306 SYS_RES_MEMORY, &rid, RF_ACTIVE);
307 if (sc->nfe_msix_pba_res == NULL) {
308 device_printf(sc->nfe_dev,
309 "couldn't allocate MSIX PBA resource\n");
310 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
312 sc->nfe_msix_res = NULL;
316 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
317 if (count == NFE_MSI_MESSAGES) {
319 device_printf(sc->nfe_dev,
320 "Using %d MSIX messages\n", count);
324 device_printf(sc->nfe_dev,
325 "couldn't allocate MSIX\n");
326 pci_release_msi(sc->nfe_dev);
327 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
328 PCIR_BAR(3), sc->nfe_msix_pba_res);
329 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
330 PCIR_BAR(2), sc->nfe_msix_res);
331 sc->nfe_msix_pba_res = NULL;
332 sc->nfe_msix_res = NULL;
338 nfe_attach(device_t dev)
340 struct nfe_softc *sc;
342 bus_addr_t dma_addr_max;
343 int error = 0, i, msic, reg, rid;
345 sc = device_get_softc(dev);
348 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
350 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
351 TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc);
353 pci_enable_busmaster(dev);
356 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
358 if (sc->nfe_res[0] == NULL) {
359 device_printf(dev, "couldn't map memory resources\n");
360 mtx_destroy(&sc->nfe_mtx);
364 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
367 v = pci_read_config(dev, reg + 0x08, 2);
368 /* Change max. read request size to 4096. */
371 pci_write_config(dev, reg + 0x08, v, 2);
373 v = pci_read_config(dev, reg + 0x0c, 2);
374 /* link capability */
376 width = pci_read_config(dev, reg + 0x12, 2);
377 /* negotiated link width */
378 width = (width >> 4) & 0x3f;
380 device_printf(sc->nfe_dev,
381 "warning, negotiated width of link(x%d) != "
382 "max. width of link(x%d)\n", width, v);
385 /* Allocate interrupt */
386 if (msix_disable == 0 || msi_disable == 0) {
387 if (msix_disable == 0 &&
388 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
389 nfe_alloc_msix(sc, msic);
390 if (msi_disable == 0 && sc->nfe_msix == 0 &&
391 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
392 pci_alloc_msi(dev, &msic) == 0) {
393 if (msic == NFE_MSI_MESSAGES) {
396 "Using %d MSI messages\n", msic);
399 pci_release_msi(dev);
403 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
405 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
406 RF_SHAREABLE | RF_ACTIVE);
407 if (sc->nfe_irq[0] == NULL) {
408 device_printf(dev, "couldn't allocate IRQ resources\n");
413 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
414 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
415 SYS_RES_IRQ, &rid, RF_ACTIVE);
416 if (sc->nfe_irq[i] == NULL) {
418 "couldn't allocate IRQ resources for "
419 "message %d\n", rid);
424 /* Map interrupts to vector 0. */
425 if (sc->nfe_msix != 0) {
426 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
427 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
428 } else if (sc->nfe_msi != 0) {
429 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
430 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
434 /* Set IRQ status/mask register. */
435 sc->nfe_irq_status = NFE_IRQ_STATUS;
436 sc->nfe_irq_mask = NFE_IRQ_MASK;
437 sc->nfe_intrs = NFE_IRQ_WANTED;
439 if (sc->nfe_msix != 0) {
440 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
441 sc->nfe_nointrs = NFE_IRQ_WANTED;
442 } else if (sc->nfe_msi != 0) {
443 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
444 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
447 sc->nfe_devid = pci_get_device(dev);
448 sc->nfe_revid = pci_get_revid(dev);
451 switch (sc->nfe_devid) {
452 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
453 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
454 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
455 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
456 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
458 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
459 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
460 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1;
462 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
463 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
464 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
465 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
466 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
469 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
470 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
471 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
472 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
475 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
476 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
477 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
478 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
479 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
480 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
481 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
482 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
483 case PCI_PRODUCT_NVIDIA_MCP73_LAN1:
484 case PCI_PRODUCT_NVIDIA_MCP73_LAN2:
485 case PCI_PRODUCT_NVIDIA_MCP73_LAN3:
486 case PCI_PRODUCT_NVIDIA_MCP73_LAN4:
487 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
488 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2;
490 case PCI_PRODUCT_NVIDIA_MCP77_LAN1:
491 case PCI_PRODUCT_NVIDIA_MCP77_LAN2:
492 case PCI_PRODUCT_NVIDIA_MCP77_LAN3:
493 case PCI_PRODUCT_NVIDIA_MCP77_LAN4:
494 /* XXX flow control */
495 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT |
496 NFE_CORRECT_MACADDR | NFE_MIB_V3;
498 case PCI_PRODUCT_NVIDIA_MCP79_LAN1:
499 case PCI_PRODUCT_NVIDIA_MCP79_LAN2:
500 case PCI_PRODUCT_NVIDIA_MCP79_LAN3:
501 case PCI_PRODUCT_NVIDIA_MCP79_LAN4:
502 /* XXX flow control */
503 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
504 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3;
506 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
507 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
508 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
509 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
510 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
511 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL |
517 /* Check for reversed ethernet address */
518 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
519 sc->nfe_flags |= NFE_CORRECT_MACADDR;
520 nfe_get_macaddr(sc, sc->eaddr);
522 * Allocate the parent bus DMA tag appropriate for PCI.
524 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
525 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
526 dma_addr_max = NFE_DMA_MAXADDR;
527 error = bus_dma_tag_create(
528 bus_get_dma_tag(sc->nfe_dev), /* parent */
529 1, 0, /* alignment, boundary */
530 dma_addr_max, /* lowaddr */
531 BUS_SPACE_MAXADDR, /* highaddr */
532 NULL, NULL, /* filter, filterarg */
533 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
534 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
536 NULL, NULL, /* lockfunc, lockarg */
537 &sc->nfe_parent_tag);
541 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
543 device_printf(dev, "can not if_alloc()\n");
547 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
550 * Allocate Tx and Rx rings.
552 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
555 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
558 nfe_alloc_jrx_ring(sc, &sc->jrxq);
559 /* Create sysctl node. */
563 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
564 ifp->if_mtu = ETHERMTU;
565 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
566 ifp->if_ioctl = nfe_ioctl;
567 ifp->if_start = nfe_start;
568 ifp->if_hwassist = 0;
569 ifp->if_capabilities = 0;
570 ifp->if_watchdog = NULL;
571 ifp->if_init = nfe_init;
572 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
573 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
574 IFQ_SET_READY(&ifp->if_snd);
576 if (sc->nfe_flags & NFE_HW_CSUM) {
577 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
578 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
580 ifp->if_capenable = ifp->if_capabilities;
582 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
583 /* VLAN capability setup. */
584 ifp->if_capabilities |= IFCAP_VLAN_MTU;
585 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
586 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
587 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
588 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
590 ifp->if_capenable = ifp->if_capabilities;
593 * Tell the upper layer(s) we support long frames.
594 * Must appear after the call to ether_ifattach() because
595 * ether_ifattach() sets ifi_hdrlen to the default value.
597 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
599 #ifdef DEVICE_POLLING
600 ifp->if_capabilities |= IFCAP_POLLING;
604 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
606 device_printf(dev, "MII without any phy!\n");
610 ether_ifattach(ifp, sc->eaddr);
612 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
613 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
614 taskqueue_thread_enqueue, &sc->nfe_tq);
615 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
616 device_get_nameunit(sc->nfe_dev));
618 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
619 error = bus_setup_intr(dev, sc->nfe_irq[0],
620 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
621 &sc->nfe_intrhand[0]);
623 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
624 error = bus_setup_intr(dev, sc->nfe_irq[i],
625 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
626 &sc->nfe_intrhand[i]);
632 device_printf(dev, "couldn't set up irq\n");
633 taskqueue_free(sc->nfe_tq);
648 nfe_detach(device_t dev)
650 struct nfe_softc *sc;
652 uint8_t eaddr[ETHER_ADDR_LEN];
655 sc = device_get_softc(dev);
656 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
659 #ifdef DEVICE_POLLING
660 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
661 ether_poll_deregister(ifp);
663 if (device_is_attached(dev)) {
666 ifp->if_flags &= ~IFF_UP;
668 callout_drain(&sc->nfe_stat_ch);
669 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
670 taskqueue_drain(taskqueue_swi, &sc->nfe_link_task);
675 /* restore ethernet address */
676 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
677 for (i = 0; i < ETHER_ADDR_LEN; i++) {
678 eaddr[i] = sc->eaddr[5 - i];
681 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
682 nfe_set_macaddr(sc, eaddr);
686 device_delete_child(dev, sc->nfe_miibus);
687 bus_generic_detach(dev);
688 if (sc->nfe_tq != NULL) {
689 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
690 taskqueue_free(sc->nfe_tq);
694 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
695 if (sc->nfe_intrhand[i] != NULL) {
696 bus_teardown_intr(dev, sc->nfe_irq[i],
697 sc->nfe_intrhand[i]);
698 sc->nfe_intrhand[i] = NULL;
702 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
703 if (sc->nfe_irq[0] != NULL)
704 bus_release_resource(dev, SYS_RES_IRQ, 0,
707 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
708 if (sc->nfe_irq[i] != NULL) {
709 bus_release_resource(dev, SYS_RES_IRQ, rid,
711 sc->nfe_irq[i] = NULL;
714 pci_release_msi(dev);
716 if (sc->nfe_msix_pba_res != NULL) {
717 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
718 sc->nfe_msix_pba_res);
719 sc->nfe_msix_pba_res = NULL;
721 if (sc->nfe_msix_res != NULL) {
722 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
724 sc->nfe_msix_res = NULL;
726 if (sc->nfe_res[0] != NULL) {
727 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
729 sc->nfe_res[0] = NULL;
732 nfe_free_tx_ring(sc, &sc->txq);
733 nfe_free_rx_ring(sc, &sc->rxq);
734 nfe_free_jrx_ring(sc, &sc->jrxq);
736 if (sc->nfe_parent_tag) {
737 bus_dma_tag_destroy(sc->nfe_parent_tag);
738 sc->nfe_parent_tag = NULL;
741 mtx_destroy(&sc->nfe_mtx);
748 nfe_suspend(device_t dev)
750 struct nfe_softc *sc;
752 sc = device_get_softc(dev);
755 nfe_stop(sc->nfe_ifp);
756 sc->nfe_suspended = 1;
764 nfe_resume(device_t dev)
766 struct nfe_softc *sc;
769 sc = device_get_softc(dev);
773 if (ifp->if_flags & IFF_UP)
775 sc->nfe_suspended = 0;
782 /* Take PHY/NIC out of powerdown, from Linux */
784 nfe_power(struct nfe_softc *sc)
788 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
790 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
791 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
793 NFE_WRITE(sc, NFE_MAC_RESET, 0);
795 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
796 pwr = NFE_READ(sc, NFE_PWR2_CTL);
797 pwr &= ~NFE_PWR2_WAKEUP_MASK;
798 if (sc->nfe_revid >= 0xa3 &&
799 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
800 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
801 pwr |= NFE_PWR2_REVA3;
802 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
807 nfe_miibus_statchg(device_t dev)
809 struct nfe_softc *sc;
811 sc = device_get_softc(dev);
812 taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task);
817 nfe_link_task(void *arg, int pending)
819 struct nfe_softc *sc;
820 struct mii_data *mii;
822 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
823 uint32_t gmask, rxctl, txctl, val;
825 sc = (struct nfe_softc *)arg;
829 mii = device_get_softc(sc->nfe_miibus);
831 if (mii == NULL || ifp == NULL) {
836 if (mii->mii_media_status & IFM_ACTIVE) {
837 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
842 phy = NFE_READ(sc, NFE_PHY_IFACE);
843 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
845 seed = NFE_READ(sc, NFE_RNDSEED);
846 seed &= ~NFE_SEED_MASK;
848 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) {
849 phy |= NFE_PHY_HDX; /* half-duplex */
850 misc |= NFE_MISC1_HDX;
853 switch (IFM_SUBTYPE(mii->mii_media_active)) {
854 case IFM_1000_T: /* full-duplex only */
855 link |= NFE_MEDIA_1000T;
856 seed |= NFE_SEED_1000T;
857 phy |= NFE_PHY_1000T;
860 link |= NFE_MEDIA_100TX;
861 seed |= NFE_SEED_100TX;
862 phy |= NFE_PHY_100TX;
865 link |= NFE_MEDIA_10T;
866 seed |= NFE_SEED_10T;
870 if ((phy & 0x10000000) != 0) {
871 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
872 val = NFE_R1_MAGIC_1000;
874 val = NFE_R1_MAGIC_10_100;
876 val = NFE_R1_MAGIC_DEFAULT;
877 NFE_WRITE(sc, NFE_SETUP_R1, val);
879 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
881 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
882 NFE_WRITE(sc, NFE_MISC1, misc);
883 NFE_WRITE(sc, NFE_LINKSPEED, link);
885 gmask = mii->mii_media_active & IFM_GMASK;
886 if ((gmask & IFM_FDX) != 0) {
887 /* It seems all hardwares supports Rx pause frames. */
888 val = NFE_READ(sc, NFE_RXFILTER);
889 if ((gmask & IFM_FLAG0) != 0)
890 val |= NFE_PFF_RX_PAUSE;
892 val &= ~NFE_PFF_RX_PAUSE;
893 NFE_WRITE(sc, NFE_RXFILTER, val);
894 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
895 val = NFE_READ(sc, NFE_MISC1);
896 if ((gmask & IFM_FLAG1) != 0) {
897 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
898 NFE_TX_PAUSE_FRAME_ENABLE);
899 val |= NFE_MISC1_TX_PAUSE;
901 val &= ~NFE_MISC1_TX_PAUSE;
902 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
903 NFE_TX_PAUSE_FRAME_DISABLE);
905 NFE_WRITE(sc, NFE_MISC1, val);
908 /* disable rx/tx pause frames */
909 val = NFE_READ(sc, NFE_RXFILTER);
910 val &= ~NFE_PFF_RX_PAUSE;
911 NFE_WRITE(sc, NFE_RXFILTER, val);
912 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
913 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
914 NFE_TX_PAUSE_FRAME_DISABLE);
915 val = NFE_READ(sc, NFE_MISC1);
916 val &= ~NFE_MISC1_TX_PAUSE;
917 NFE_WRITE(sc, NFE_MISC1, val);
921 txctl = NFE_READ(sc, NFE_TX_CTL);
922 rxctl = NFE_READ(sc, NFE_RX_CTL);
923 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
924 txctl |= NFE_TX_START;
925 rxctl |= NFE_RX_START;
927 txctl &= ~NFE_TX_START;
928 rxctl &= ~NFE_RX_START;
930 NFE_WRITE(sc, NFE_TX_CTL, txctl);
931 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
938 nfe_miibus_readreg(device_t dev, int phy, int reg)
940 struct nfe_softc *sc = device_get_softc(dev);
944 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
946 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
947 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
951 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
953 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
955 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
958 if (ntries == NFE_TIMEOUT) {
959 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
963 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
964 DPRINTFN(sc, 2, "could not read PHY\n");
968 val = NFE_READ(sc, NFE_PHY_DATA);
969 if (val != 0xffffffff && val != 0)
970 sc->mii_phyaddr = phy;
972 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
979 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
981 struct nfe_softc *sc = device_get_softc(dev);
985 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
987 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
988 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
992 NFE_WRITE(sc, NFE_PHY_DATA, val);
993 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
994 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
996 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
998 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
1002 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
1003 device_printf(sc->nfe_dev, "could not write to PHY\n");
1008 struct nfe_dmamap_arg {
1009 bus_addr_t nfe_busaddr;
1013 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1015 struct nfe_dmamap_arg ctx;
1016 struct nfe_rx_data *data;
1018 int i, error, descsize;
1020 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1021 desc = ring->desc64;
1022 descsize = sizeof (struct nfe_desc64);
1024 desc = ring->desc32;
1025 descsize = sizeof (struct nfe_desc32);
1028 ring->cur = ring->next = 0;
1030 error = bus_dma_tag_create(sc->nfe_parent_tag,
1031 NFE_RING_ALIGN, 0, /* alignment, boundary */
1032 BUS_SPACE_MAXADDR, /* lowaddr */
1033 BUS_SPACE_MAXADDR, /* highaddr */
1034 NULL, NULL, /* filter, filterarg */
1035 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1036 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1038 NULL, NULL, /* lockfunc, lockarg */
1039 &ring->rx_desc_tag);
1041 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1045 /* allocate memory to desc */
1046 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1047 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1049 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1052 if (sc->nfe_flags & NFE_40BIT_ADDR)
1053 ring->desc64 = desc;
1055 ring->desc32 = desc;
1057 /* map desc to device visible address space */
1058 ctx.nfe_busaddr = 0;
1059 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1060 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1062 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1065 ring->physaddr = ctx.nfe_busaddr;
1067 error = bus_dma_tag_create(sc->nfe_parent_tag,
1068 1, 0, /* alignment, boundary */
1069 BUS_SPACE_MAXADDR, /* lowaddr */
1070 BUS_SPACE_MAXADDR, /* highaddr */
1071 NULL, NULL, /* filter, filterarg */
1072 MCLBYTES, 1, /* maxsize, nsegments */
1073 MCLBYTES, /* maxsegsize */
1075 NULL, NULL, /* lockfunc, lockarg */
1076 &ring->rx_data_tag);
1078 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1082 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1084 device_printf(sc->nfe_dev,
1085 "could not create Rx DMA spare map\n");
1090 * Pre-allocate Rx buffers and populate Rx ring.
1092 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1093 data = &sc->rxq.data[i];
1094 data->rx_data_map = NULL;
1096 error = bus_dmamap_create(ring->rx_data_tag, 0,
1097 &data->rx_data_map);
1099 device_printf(sc->nfe_dev,
1100 "could not create Rx DMA map\n");
1111 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1113 struct nfe_dmamap_arg ctx;
1114 struct nfe_rx_data *data;
1116 int i, error, descsize;
1118 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1120 if (jumbo_disable != 0) {
1121 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1122 sc->nfe_jumbo_disable = 1;
1126 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1127 desc = ring->jdesc64;
1128 descsize = sizeof (struct nfe_desc64);
1130 desc = ring->jdesc32;
1131 descsize = sizeof (struct nfe_desc32);
1134 ring->jcur = ring->jnext = 0;
1136 /* Create DMA tag for jumbo Rx ring. */
1137 error = bus_dma_tag_create(sc->nfe_parent_tag,
1138 NFE_RING_ALIGN, 0, /* alignment, boundary */
1139 BUS_SPACE_MAXADDR, /* lowaddr */
1140 BUS_SPACE_MAXADDR, /* highaddr */
1141 NULL, NULL, /* filter, filterarg */
1142 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1144 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1146 NULL, NULL, /* lockfunc, lockarg */
1147 &ring->jrx_desc_tag);
1149 device_printf(sc->nfe_dev,
1150 "could not create jumbo ring DMA tag\n");
1154 /* Create DMA tag for jumbo Rx buffers. */
1155 error = bus_dma_tag_create(sc->nfe_parent_tag,
1156 1, 0, /* alignment, boundary */
1157 BUS_SPACE_MAXADDR, /* lowaddr */
1158 BUS_SPACE_MAXADDR, /* highaddr */
1159 NULL, NULL, /* filter, filterarg */
1160 MJUM9BYTES, /* maxsize */
1162 MJUM9BYTES, /* maxsegsize */
1164 NULL, NULL, /* lockfunc, lockarg */
1165 &ring->jrx_data_tag);
1167 device_printf(sc->nfe_dev,
1168 "could not create jumbo Rx buffer DMA tag\n");
1172 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1173 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1174 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1176 device_printf(sc->nfe_dev,
1177 "could not allocate DMA'able memory for jumbo Rx ring\n");
1180 if (sc->nfe_flags & NFE_40BIT_ADDR)
1181 ring->jdesc64 = desc;
1183 ring->jdesc32 = desc;
1185 ctx.nfe_busaddr = 0;
1186 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1187 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1189 device_printf(sc->nfe_dev,
1190 "could not load DMA'able memory for jumbo Rx ring\n");
1193 ring->jphysaddr = ctx.nfe_busaddr;
1195 /* Create DMA maps for jumbo Rx buffers. */
1196 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1198 device_printf(sc->nfe_dev,
1199 "could not create jumbo Rx DMA spare map\n");
1203 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1204 data = &sc->jrxq.jdata[i];
1205 data->rx_data_map = NULL;
1207 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1208 &data->rx_data_map);
1210 device_printf(sc->nfe_dev,
1211 "could not create jumbo Rx DMA map\n");
1220 * Running without jumbo frame support is ok for most cases
1221 * so don't fail on creating dma tag/map for jumbo frame.
1223 nfe_free_jrx_ring(sc, ring);
1224 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1225 "resource shortage\n");
1226 sc->nfe_jumbo_disable = 1;
1231 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1237 ring->cur = ring->next = 0;
1238 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1239 desc = ring->desc64;
1240 descsize = sizeof (struct nfe_desc64);
1242 desc = ring->desc32;
1243 descsize = sizeof (struct nfe_desc32);
1245 bzero(desc, descsize * NFE_RX_RING_COUNT);
1246 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1247 if (nfe_newbuf(sc, i) != 0)
1251 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1252 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1259 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1265 ring->jcur = ring->jnext = 0;
1266 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1267 desc = ring->jdesc64;
1268 descsize = sizeof (struct nfe_desc64);
1270 desc = ring->jdesc32;
1271 descsize = sizeof (struct nfe_desc32);
1273 bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT);
1274 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1275 if (nfe_jnewbuf(sc, i) != 0)
1279 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1280 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1287 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1289 struct nfe_rx_data *data;
1293 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1294 desc = ring->desc64;
1295 descsize = sizeof (struct nfe_desc64);
1297 desc = ring->desc32;
1298 descsize = sizeof (struct nfe_desc32);
1301 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1302 data = &ring->data[i];
1303 if (data->rx_data_map != NULL) {
1304 bus_dmamap_destroy(ring->rx_data_tag,
1306 data->rx_data_map = NULL;
1308 if (data->m != NULL) {
1313 if (ring->rx_data_tag != NULL) {
1314 if (ring->rx_spare_map != NULL) {
1315 bus_dmamap_destroy(ring->rx_data_tag,
1316 ring->rx_spare_map);
1317 ring->rx_spare_map = NULL;
1319 bus_dma_tag_destroy(ring->rx_data_tag);
1320 ring->rx_data_tag = NULL;
1324 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1325 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1326 ring->desc64 = NULL;
1327 ring->desc32 = NULL;
1328 ring->rx_desc_map = NULL;
1330 if (ring->rx_desc_tag != NULL) {
1331 bus_dma_tag_destroy(ring->rx_desc_tag);
1332 ring->rx_desc_tag = NULL;
1338 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1340 struct nfe_rx_data *data;
1344 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1347 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1348 desc = ring->jdesc64;
1349 descsize = sizeof (struct nfe_desc64);
1351 desc = ring->jdesc32;
1352 descsize = sizeof (struct nfe_desc32);
1355 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1356 data = &ring->jdata[i];
1357 if (data->rx_data_map != NULL) {
1358 bus_dmamap_destroy(ring->jrx_data_tag,
1360 data->rx_data_map = NULL;
1362 if (data->m != NULL) {
1367 if (ring->jrx_data_tag != NULL) {
1368 if (ring->jrx_spare_map != NULL) {
1369 bus_dmamap_destroy(ring->jrx_data_tag,
1370 ring->jrx_spare_map);
1371 ring->jrx_spare_map = NULL;
1373 bus_dma_tag_destroy(ring->jrx_data_tag);
1374 ring->jrx_data_tag = NULL;
1378 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1379 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1380 ring->jdesc64 = NULL;
1381 ring->jdesc32 = NULL;
1382 ring->jrx_desc_map = NULL;
1385 if (ring->jrx_desc_tag != NULL) {
1386 bus_dma_tag_destroy(ring->jrx_desc_tag);
1387 ring->jrx_desc_tag = NULL;
1393 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1395 struct nfe_dmamap_arg ctx;
1400 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1401 desc = ring->desc64;
1402 descsize = sizeof (struct nfe_desc64);
1404 desc = ring->desc32;
1405 descsize = sizeof (struct nfe_desc32);
1409 ring->cur = ring->next = 0;
1411 error = bus_dma_tag_create(sc->nfe_parent_tag,
1412 NFE_RING_ALIGN, 0, /* alignment, boundary */
1413 BUS_SPACE_MAXADDR, /* lowaddr */
1414 BUS_SPACE_MAXADDR, /* highaddr */
1415 NULL, NULL, /* filter, filterarg */
1416 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1417 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1419 NULL, NULL, /* lockfunc, lockarg */
1420 &ring->tx_desc_tag);
1422 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1426 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1427 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1429 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1432 if (sc->nfe_flags & NFE_40BIT_ADDR)
1433 ring->desc64 = desc;
1435 ring->desc32 = desc;
1437 ctx.nfe_busaddr = 0;
1438 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1439 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1441 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1444 ring->physaddr = ctx.nfe_busaddr;
1446 error = bus_dma_tag_create(sc->nfe_parent_tag,
1456 &ring->tx_data_tag);
1458 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1462 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1463 error = bus_dmamap_create(ring->tx_data_tag, 0,
1464 &ring->data[i].tx_data_map);
1466 device_printf(sc->nfe_dev,
1467 "could not create Tx DMA map\n");
1478 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1483 sc->nfe_force_tx = 0;
1485 ring->cur = ring->next = 0;
1486 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1487 desc = ring->desc64;
1488 descsize = sizeof (struct nfe_desc64);
1490 desc = ring->desc32;
1491 descsize = sizeof (struct nfe_desc32);
1493 bzero(desc, descsize * NFE_TX_RING_COUNT);
1495 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1496 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1501 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1503 struct nfe_tx_data *data;
1507 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1508 desc = ring->desc64;
1509 descsize = sizeof (struct nfe_desc64);
1511 desc = ring->desc32;
1512 descsize = sizeof (struct nfe_desc32);
1515 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1516 data = &ring->data[i];
1518 if (data->m != NULL) {
1519 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1520 BUS_DMASYNC_POSTWRITE);
1521 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1525 if (data->tx_data_map != NULL) {
1526 bus_dmamap_destroy(ring->tx_data_tag,
1528 data->tx_data_map = NULL;
1532 if (ring->tx_data_tag != NULL) {
1533 bus_dma_tag_destroy(ring->tx_data_tag);
1534 ring->tx_data_tag = NULL;
1538 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1539 BUS_DMASYNC_POSTWRITE);
1540 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1541 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1542 ring->desc64 = NULL;
1543 ring->desc32 = NULL;
1544 ring->tx_desc_map = NULL;
1545 bus_dma_tag_destroy(ring->tx_desc_tag);
1546 ring->tx_desc_tag = NULL;
1550 #ifdef DEVICE_POLLING
1551 static poll_handler_t nfe_poll;
1555 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1557 struct nfe_softc *sc = ifp->if_softc;
1563 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1568 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1569 rx_npkts = nfe_jrxeof(sc, count, &rx_npkts);
1571 rx_npkts = nfe_rxeof(sc, count, &rx_npkts);
1573 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1574 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1576 if (cmd == POLL_AND_CHECK_STATUS) {
1577 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1581 NFE_WRITE(sc, sc->nfe_irq_status, r);
1583 if (r & NFE_IRQ_LINK) {
1584 NFE_READ(sc, NFE_PHY_STATUS);
1585 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1586 DPRINTF(sc, "link state changed\n");
1592 #endif /* DEVICE_POLLING */
1595 nfe_set_intr(struct nfe_softc *sc)
1598 if (sc->nfe_msi != 0)
1599 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1603 /* In MSIX, a write to mask reegisters behaves as XOR. */
1604 static __inline void
1605 nfe_enable_intr(struct nfe_softc *sc)
1608 if (sc->nfe_msix != 0) {
1609 /* XXX Should have a better way to enable interrupts! */
1610 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1611 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1613 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1617 static __inline void
1618 nfe_disable_intr(struct nfe_softc *sc)
1621 if (sc->nfe_msix != 0) {
1622 /* XXX Should have a better way to disable interrupts! */
1623 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1624 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1626 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1631 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1633 struct nfe_softc *sc;
1635 struct mii_data *mii;
1636 int error, init, mask;
1639 ifr = (struct ifreq *) data;
1644 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1646 else if (ifp->if_mtu != ifr->ifr_mtu) {
1647 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1648 (sc->nfe_jumbo_disable != 0)) &&
1649 ifr->ifr_mtu > ETHERMTU)
1653 ifp->if_mtu = ifr->ifr_mtu;
1654 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1655 nfe_init_locked(sc);
1662 if (ifp->if_flags & IFF_UP) {
1664 * If only the PROMISC or ALLMULTI flag changes, then
1665 * don't do a full re-init of the chip, just update
1668 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1669 ((ifp->if_flags ^ sc->nfe_if_flags) &
1670 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1673 nfe_init_locked(sc);
1675 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1678 sc->nfe_if_flags = ifp->if_flags;
1684 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1693 mii = device_get_softc(sc->nfe_miibus);
1694 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1697 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1698 #ifdef DEVICE_POLLING
1699 if ((mask & IFCAP_POLLING) != 0) {
1700 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1701 error = ether_poll_register(nfe_poll, ifp);
1705 nfe_disable_intr(sc);
1706 ifp->if_capenable |= IFCAP_POLLING;
1709 error = ether_poll_deregister(ifp);
1710 /* Enable interrupt even in error case */
1712 nfe_enable_intr(sc);
1713 ifp->if_capenable &= ~IFCAP_POLLING;
1717 #endif /* DEVICE_POLLING */
1718 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1719 (mask & IFCAP_HWCSUM) != 0) {
1720 ifp->if_capenable ^= IFCAP_HWCSUM;
1721 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1722 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1723 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1725 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1728 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1729 (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1730 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1735 * It seems that VLAN stripping requires Rx checksum offload.
1736 * Unfortunately FreeBSD has no way to disable only Rx side
1737 * VLAN stripping. So when we know Rx checksum offload is
1738 * disabled turn entire hardware VLAN assist off.
1740 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1741 (NFE_HW_CSUM | NFE_HW_VLAN)) {
1742 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1743 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1746 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1747 (mask & IFCAP_TSO4) != 0) {
1748 ifp->if_capenable ^= IFCAP_TSO4;
1749 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1750 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1751 ifp->if_hwassist |= CSUM_TSO;
1753 ifp->if_hwassist &= ~CSUM_TSO;
1756 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1757 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1760 if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1761 VLAN_CAPABILITIES(ifp);
1764 error = ether_ioctl(ifp, cmd, data);
1775 struct nfe_softc *sc;
1778 sc = (struct nfe_softc *)arg;
1780 status = NFE_READ(sc, sc->nfe_irq_status);
1781 if (status == 0 || status == 0xffffffff)
1782 return (FILTER_STRAY);
1783 nfe_disable_intr(sc);
1784 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1786 return (FILTER_HANDLED);
1791 nfe_int_task(void *arg, int pending)
1793 struct nfe_softc *sc = arg;
1794 struct ifnet *ifp = sc->nfe_ifp;
1800 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1801 nfe_enable_intr(sc);
1803 return; /* not for us */
1805 NFE_WRITE(sc, sc->nfe_irq_status, r);
1807 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1809 #ifdef DEVICE_POLLING
1810 if (ifp->if_capenable & IFCAP_POLLING) {
1816 if (r & NFE_IRQ_LINK) {
1817 NFE_READ(sc, NFE_PHY_STATUS);
1818 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1819 DPRINTF(sc, "link state changed\n");
1822 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1824 nfe_enable_intr(sc);
1830 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1831 domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL);
1833 domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL);
1837 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1838 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1842 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1843 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1847 /* Reenable interrupts. */
1848 nfe_enable_intr(sc);
1852 static __inline void
1853 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1855 struct nfe_desc32 *desc32;
1856 struct nfe_desc64 *desc64;
1857 struct nfe_rx_data *data;
1860 data = &sc->rxq.data[idx];
1863 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1864 desc64 = &sc->rxq.desc64[idx];
1865 /* VLAN packet may have overwritten it. */
1866 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1867 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1868 desc64->length = htole16(m->m_len);
1869 desc64->flags = htole16(NFE_RX_READY);
1871 desc32 = &sc->rxq.desc32[idx];
1872 desc32->length = htole16(m->m_len);
1873 desc32->flags = htole16(NFE_RX_READY);
1878 static __inline void
1879 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1881 struct nfe_desc32 *desc32;
1882 struct nfe_desc64 *desc64;
1883 struct nfe_rx_data *data;
1886 data = &sc->jrxq.jdata[idx];
1889 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1890 desc64 = &sc->jrxq.jdesc64[idx];
1891 /* VLAN packet may have overwritten it. */
1892 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1893 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1894 desc64->length = htole16(m->m_len);
1895 desc64->flags = htole16(NFE_RX_READY);
1897 desc32 = &sc->jrxq.jdesc32[idx];
1898 desc32->length = htole16(m->m_len);
1899 desc32->flags = htole16(NFE_RX_READY);
1905 nfe_newbuf(struct nfe_softc *sc, int idx)
1907 struct nfe_rx_data *data;
1908 struct nfe_desc32 *desc32;
1909 struct nfe_desc64 *desc64;
1911 bus_dma_segment_t segs[1];
1915 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1919 m->m_len = m->m_pkthdr.len = MCLBYTES;
1920 m_adj(m, ETHER_ALIGN);
1922 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
1923 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1927 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1929 data = &sc->rxq.data[idx];
1930 if (data->m != NULL) {
1931 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1932 BUS_DMASYNC_POSTREAD);
1933 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
1935 map = data->rx_data_map;
1936 data->rx_data_map = sc->rxq.rx_spare_map;
1937 sc->rxq.rx_spare_map = map;
1938 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
1939 BUS_DMASYNC_PREREAD);
1940 data->paddr = segs[0].ds_addr;
1942 /* update mapping address in h/w descriptor */
1943 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1944 desc64 = &sc->rxq.desc64[idx];
1945 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
1946 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1947 desc64->length = htole16(segs[0].ds_len);
1948 desc64->flags = htole16(NFE_RX_READY);
1950 desc32 = &sc->rxq.desc32[idx];
1951 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
1952 desc32->length = htole16(segs[0].ds_len);
1953 desc32->flags = htole16(NFE_RX_READY);
1961 nfe_jnewbuf(struct nfe_softc *sc, int idx)
1963 struct nfe_rx_data *data;
1964 struct nfe_desc32 *desc32;
1965 struct nfe_desc64 *desc64;
1967 bus_dma_segment_t segs[1];
1971 m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
1974 if ((m->m_flags & M_EXT) == 0) {
1978 m->m_pkthdr.len = m->m_len = MJUM9BYTES;
1979 m_adj(m, ETHER_ALIGN);
1981 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
1982 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
1986 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1988 data = &sc->jrxq.jdata[idx];
1989 if (data->m != NULL) {
1990 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
1991 BUS_DMASYNC_POSTREAD);
1992 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
1994 map = data->rx_data_map;
1995 data->rx_data_map = sc->jrxq.jrx_spare_map;
1996 sc->jrxq.jrx_spare_map = map;
1997 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
1998 BUS_DMASYNC_PREREAD);
1999 data->paddr = segs[0].ds_addr;
2001 /* update mapping address in h/w descriptor */
2002 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2003 desc64 = &sc->jrxq.jdesc64[idx];
2004 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2005 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2006 desc64->length = htole16(segs[0].ds_len);
2007 desc64->flags = htole16(NFE_RX_READY);
2009 desc32 = &sc->jrxq.jdesc32[idx];
2010 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2011 desc32->length = htole16(segs[0].ds_len);
2012 desc32->flags = htole16(NFE_RX_READY);
2020 nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2022 struct ifnet *ifp = sc->nfe_ifp;
2023 struct nfe_desc32 *desc32;
2024 struct nfe_desc64 *desc64;
2025 struct nfe_rx_data *data;
2028 int len, prog, rx_npkts;
2032 NFE_LOCK_ASSERT(sc);
2034 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2035 BUS_DMASYNC_POSTREAD);
2037 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2042 data = &sc->rxq.data[sc->rxq.cur];
2044 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2045 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2046 vtag = le32toh(desc64->physaddr[1]);
2047 flags = le16toh(desc64->flags);
2048 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2050 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2051 flags = le16toh(desc32->flags);
2052 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2055 if (flags & NFE_RX_READY)
2058 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2059 if (!(flags & NFE_RX_VALID_V1)) {
2061 nfe_discard_rxbuf(sc, sc->rxq.cur);
2064 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2065 flags &= ~NFE_RX_ERROR;
2066 len--; /* fix buffer length */
2069 if (!(flags & NFE_RX_VALID_V2)) {
2071 nfe_discard_rxbuf(sc, sc->rxq.cur);
2075 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2076 flags &= ~NFE_RX_ERROR;
2077 len--; /* fix buffer length */
2081 if (flags & NFE_RX_ERROR) {
2083 nfe_discard_rxbuf(sc, sc->rxq.cur);
2088 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2090 nfe_discard_rxbuf(sc, sc->rxq.cur);
2094 if ((vtag & NFE_RX_VTAG) != 0 &&
2095 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2096 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2097 m->m_flags |= M_VLANTAG;
2100 m->m_pkthdr.len = m->m_len = len;
2101 m->m_pkthdr.rcvif = ifp;
2103 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2104 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2105 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2106 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2107 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2108 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2109 m->m_pkthdr.csum_flags |=
2110 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2111 m->m_pkthdr.csum_data = 0xffff;
2119 (*ifp->if_input)(ifp, m);
2125 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2126 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2128 if (rx_npktsp != NULL)
2129 *rx_npktsp = rx_npkts;
2130 return (count > 0 ? 0 : EAGAIN);
2135 nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
2137 struct ifnet *ifp = sc->nfe_ifp;
2138 struct nfe_desc32 *desc32;
2139 struct nfe_desc64 *desc64;
2140 struct nfe_rx_data *data;
2143 int len, prog, rx_npkts;
2147 NFE_LOCK_ASSERT(sc);
2149 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2150 BUS_DMASYNC_POSTREAD);
2152 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2158 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2160 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2161 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2162 vtag = le32toh(desc64->physaddr[1]);
2163 flags = le16toh(desc64->flags);
2164 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2166 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2167 flags = le16toh(desc32->flags);
2168 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2171 if (flags & NFE_RX_READY)
2174 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2175 if (!(flags & NFE_RX_VALID_V1)) {
2177 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2180 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2181 flags &= ~NFE_RX_ERROR;
2182 len--; /* fix buffer length */
2185 if (!(flags & NFE_RX_VALID_V2)) {
2187 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2191 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2192 flags &= ~NFE_RX_ERROR;
2193 len--; /* fix buffer length */
2197 if (flags & NFE_RX_ERROR) {
2199 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2204 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2206 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2210 if ((vtag & NFE_RX_VTAG) != 0 &&
2211 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2212 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2213 m->m_flags |= M_VLANTAG;
2216 m->m_pkthdr.len = m->m_len = len;
2217 m->m_pkthdr.rcvif = ifp;
2219 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2220 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2221 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2222 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2223 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2224 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2225 m->m_pkthdr.csum_flags |=
2226 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2227 m->m_pkthdr.csum_data = 0xffff;
2235 (*ifp->if_input)(ifp, m);
2241 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2242 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2244 if (rx_npktsp != NULL)
2245 *rx_npktsp = rx_npkts;
2246 return (count > 0 ? 0 : EAGAIN);
2251 nfe_txeof(struct nfe_softc *sc)
2253 struct ifnet *ifp = sc->nfe_ifp;
2254 struct nfe_desc32 *desc32;
2255 struct nfe_desc64 *desc64;
2256 struct nfe_tx_data *data = NULL;
2260 NFE_LOCK_ASSERT(sc);
2262 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2263 BUS_DMASYNC_POSTREAD);
2266 for (cons = sc->txq.next; cons != sc->txq.cur;
2267 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2268 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2269 desc64 = &sc->txq.desc64[cons];
2270 flags = le16toh(desc64->flags);
2272 desc32 = &sc->txq.desc32[cons];
2273 flags = le16toh(desc32->flags);
2276 if (flags & NFE_TX_VALID)
2281 data = &sc->txq.data[cons];
2283 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2284 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2286 if ((flags & NFE_TX_ERROR_V1) != 0) {
2287 device_printf(sc->nfe_dev,
2288 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2294 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2296 if ((flags & NFE_TX_ERROR_V2) != 0) {
2297 device_printf(sc->nfe_dev,
2298 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2304 /* last fragment of the mbuf chain transmitted */
2305 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2306 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2307 BUS_DMASYNC_POSTWRITE);
2308 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2314 sc->nfe_force_tx = 0;
2315 sc->txq.next = cons;
2316 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2317 if (sc->txq.queued == 0)
2318 sc->nfe_watchdog_timer = 0;
2323 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2325 struct nfe_desc32 *desc32 = NULL;
2326 struct nfe_desc64 *desc64 = NULL;
2328 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2329 int error, i, nsegs, prod, si;
2331 uint16_t cflags, flags;
2334 prod = si = sc->txq.cur;
2335 map = sc->txq.data[prod].tx_data_map;
2337 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2338 &nsegs, BUS_DMA_NOWAIT);
2339 if (error == EFBIG) {
2340 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2347 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2348 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2354 } else if (error != 0)
2362 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2363 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2370 if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2371 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2372 cflags |= NFE_TX_IP_CSUM;
2373 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2374 cflags |= NFE_TX_TCP_UDP_CSUM;
2375 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2376 cflags |= NFE_TX_TCP_UDP_CSUM;
2378 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2379 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2381 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2382 cflags |= NFE_TX_TSO;
2385 for (i = 0; i < nsegs; i++) {
2386 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2387 desc64 = &sc->txq.desc64[prod];
2388 desc64->physaddr[0] =
2389 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2390 desc64->physaddr[1] =
2391 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2393 desc64->length = htole16(segs[i].ds_len - 1);
2394 desc64->flags = htole16(flags);
2396 desc32 = &sc->txq.desc32[prod];
2398 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2399 desc32->length = htole16(segs[i].ds_len - 1);
2400 desc32->flags = htole16(flags);
2404 * Setting of the valid bit in the first descriptor is
2405 * deferred until the whole chain is fully setup.
2407 flags |= NFE_TX_VALID;
2410 NFE_INC(prod, NFE_TX_RING_COUNT);
2414 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2415 * csum flags, vtag and TSO belong to the first fragment only.
2417 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2418 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2419 desc64 = &sc->txq.desc64[si];
2420 if ((m->m_flags & M_VLANTAG) != 0)
2421 desc64->vtag = htole32(NFE_TX_VTAG |
2422 m->m_pkthdr.ether_vtag);
2423 if (tso_segsz != 0) {
2426 * The following indicates the descriptor element
2427 * is a 32bit quantity.
2429 desc64->length |= htole16((uint16_t)tso_segsz);
2430 desc64->flags |= htole16(tso_segsz >> 16);
2433 * finally, set the valid/checksum/TSO bit in the first
2436 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2438 if (sc->nfe_flags & NFE_JUMBO_SUP)
2439 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2441 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2442 desc32 = &sc->txq.desc32[si];
2443 if (tso_segsz != 0) {
2446 * The following indicates the descriptor element
2447 * is a 32bit quantity.
2449 desc32->length |= htole16((uint16_t)tso_segsz);
2450 desc32->flags |= htole16(tso_segsz >> 16);
2453 * finally, set the valid/checksum/TSO bit in the first
2456 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2460 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2461 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2462 sc->txq.data[prod].tx_data_map = map;
2463 sc->txq.data[prod].m = m;
2465 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2472 nfe_setmulti(struct nfe_softc *sc)
2474 struct ifnet *ifp = sc->nfe_ifp;
2475 struct ifmultiaddr *ifma;
2478 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2479 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2480 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2483 NFE_LOCK_ASSERT(sc);
2485 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2486 bzero(addr, ETHER_ADDR_LEN);
2487 bzero(mask, ETHER_ADDR_LEN);
2491 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2492 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2494 if_maddr_rlock(ifp);
2495 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2498 if (ifma->ifma_addr->sa_family != AF_LINK)
2501 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2502 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2503 u_int8_t mcaddr = addrp[i];
2508 if_maddr_runlock(ifp);
2510 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2515 addr[0] |= 0x01; /* make sure multicast bit is set */
2517 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2518 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2519 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2520 addr[5] << 8 | addr[4]);
2521 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2522 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2523 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2524 mask[5] << 8 | mask[4]);
2526 filter = NFE_READ(sc, NFE_RXFILTER);
2527 filter &= NFE_PFF_RX_PAUSE;
2528 filter |= NFE_RXFILTER_MAGIC;
2529 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2530 NFE_WRITE(sc, NFE_RXFILTER, filter);
2535 nfe_tx_task(void *arg, int pending)
2539 ifp = (struct ifnet *)arg;
2545 nfe_start(struct ifnet *ifp)
2547 struct nfe_softc *sc = ifp->if_softc;
2553 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2554 IFF_DRV_RUNNING || sc->nfe_link == 0) {
2559 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2560 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2564 if (nfe_encap(sc, &m0) != 0) {
2567 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2568 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2572 ETHER_BPF_MTAP(ifp, m0);
2576 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2577 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2580 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2583 * Set a timeout in case the chip goes out to lunch.
2585 sc->nfe_watchdog_timer = 5;
2593 nfe_watchdog(struct ifnet *ifp)
2595 struct nfe_softc *sc = ifp->if_softc;
2597 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2600 /* Check if we've lost Tx completion interrupt. */
2602 if (sc->txq.queued == 0) {
2603 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2605 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2606 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2609 /* Check if we've lost start Tx command. */
2611 if (sc->nfe_force_tx <= 3) {
2613 * If this is the case for watchdog timeout, the following
2614 * code should go to nfe_txeof().
2616 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2619 sc->nfe_force_tx = 0;
2621 if_printf(ifp, "watchdog timeout\n");
2623 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2625 nfe_init_locked(sc);
2632 struct nfe_softc *sc = xsc;
2635 nfe_init_locked(sc);
2641 nfe_init_locked(void *xsc)
2643 struct nfe_softc *sc = xsc;
2644 struct ifnet *ifp = sc->nfe_ifp;
2645 struct mii_data *mii;
2649 NFE_LOCK_ASSERT(sc);
2651 mii = device_get_softc(sc->nfe_miibus);
2653 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2658 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2660 nfe_init_tx_ring(sc, &sc->txq);
2661 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2662 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2664 error = nfe_init_rx_ring(sc, &sc->rxq);
2666 device_printf(sc->nfe_dev,
2667 "initialization failed: no memory for rx buffers\n");
2673 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2674 val |= NFE_MAC_ADDR_INORDER;
2675 NFE_WRITE(sc, NFE_TX_UNK, val);
2676 NFE_WRITE(sc, NFE_STATUS, 0);
2678 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2679 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2681 sc->rxtxctl = NFE_RXTX_BIT2;
2682 if (sc->nfe_flags & NFE_40BIT_ADDR)
2683 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2684 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2685 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2687 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2688 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2689 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2690 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2692 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2694 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2696 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2697 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2699 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2701 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2703 /* set MAC address */
2704 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2706 /* tell MAC where rings are in memory */
2707 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2708 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2709 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2710 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2711 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2713 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2714 NFE_ADDR_HI(sc->rxq.physaddr));
2715 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2716 NFE_ADDR_LO(sc->rxq.physaddr));
2718 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2719 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2721 NFE_WRITE(sc, NFE_RING_SIZE,
2722 (NFE_RX_RING_COUNT - 1) << 16 |
2723 (NFE_TX_RING_COUNT - 1));
2725 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2727 /* force MAC to wakeup */
2728 val = NFE_READ(sc, NFE_PWR_STATE);
2729 if ((val & NFE_PWR_WAKEUP) == 0)
2730 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2732 val = NFE_READ(sc, NFE_PWR_STATE);
2733 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2736 /* configure interrupts coalescing/mitigation */
2737 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2739 /* no interrupt mitigation: one interrupt per packet */
2740 NFE_WRITE(sc, NFE_IMTIMER, 970);
2743 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2744 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2745 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2747 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2748 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2750 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2751 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
2753 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2754 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2756 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2762 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2765 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2767 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2769 /* Clear hardware stats. */
2770 nfe_stats_clear(sc);
2772 #ifdef DEVICE_POLLING
2773 if (ifp->if_capenable & IFCAP_POLLING)
2774 nfe_disable_intr(sc);
2778 nfe_enable_intr(sc); /* enable interrupts */
2780 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2781 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2786 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2791 nfe_stop(struct ifnet *ifp)
2793 struct nfe_softc *sc = ifp->if_softc;
2794 struct nfe_rx_ring *rx_ring;
2795 struct nfe_jrx_ring *jrx_ring;
2796 struct nfe_tx_ring *tx_ring;
2797 struct nfe_rx_data *rdata;
2798 struct nfe_tx_data *tdata;
2801 NFE_LOCK_ASSERT(sc);
2803 sc->nfe_watchdog_timer = 0;
2804 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2806 callout_stop(&sc->nfe_stat_ch);
2809 NFE_WRITE(sc, NFE_TX_CTL, 0);
2812 NFE_WRITE(sc, NFE_RX_CTL, 0);
2814 /* disable interrupts */
2815 nfe_disable_intr(sc);
2819 /* free Rx and Tx mbufs still in the queues. */
2821 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2822 rdata = &rx_ring->data[i];
2823 if (rdata->m != NULL) {
2824 bus_dmamap_sync(rx_ring->rx_data_tag,
2825 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2826 bus_dmamap_unload(rx_ring->rx_data_tag,
2827 rdata->rx_data_map);
2833 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2834 jrx_ring = &sc->jrxq;
2835 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2836 rdata = &jrx_ring->jdata[i];
2837 if (rdata->m != NULL) {
2838 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2839 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2840 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2841 rdata->rx_data_map);
2849 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2850 tdata = &tx_ring->data[i];
2851 if (tdata->m != NULL) {
2852 bus_dmamap_sync(tx_ring->tx_data_tag,
2853 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2854 bus_dmamap_unload(tx_ring->tx_data_tag,
2855 tdata->tx_data_map);
2860 /* Update hardware stats. */
2861 nfe_stats_update(sc);
2866 nfe_ifmedia_upd(struct ifnet *ifp)
2868 struct nfe_softc *sc = ifp->if_softc;
2869 struct mii_data *mii;
2872 mii = device_get_softc(sc->nfe_miibus);
2881 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2883 struct nfe_softc *sc;
2884 struct mii_data *mii;
2889 mii = device_get_softc(sc->nfe_miibus);
2893 ifmr->ifm_active = mii->mii_media_active;
2894 ifmr->ifm_status = mii->mii_media_status;
2901 struct nfe_softc *sc;
2902 struct mii_data *mii;
2905 sc = (struct nfe_softc *)xsc;
2907 NFE_LOCK_ASSERT(sc);
2911 mii = device_get_softc(sc->nfe_miibus);
2913 nfe_stats_update(sc);
2915 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2920 nfe_shutdown(device_t dev)
2922 struct nfe_softc *sc;
2925 sc = device_get_softc(dev);
2930 /* nfe_reset(sc); */
2938 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
2942 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
2943 val = NFE_READ(sc, NFE_MACADDR_LO);
2944 addr[0] = (val >> 8) & 0xff;
2945 addr[1] = (val & 0xff);
2947 val = NFE_READ(sc, NFE_MACADDR_HI);
2948 addr[2] = (val >> 24) & 0xff;
2949 addr[3] = (val >> 16) & 0xff;
2950 addr[4] = (val >> 8) & 0xff;
2951 addr[5] = (val & 0xff);
2953 val = NFE_READ(sc, NFE_MACADDR_LO);
2954 addr[5] = (val >> 8) & 0xff;
2955 addr[4] = (val & 0xff);
2957 val = NFE_READ(sc, NFE_MACADDR_HI);
2958 addr[3] = (val >> 24) & 0xff;
2959 addr[2] = (val >> 16) & 0xff;
2960 addr[1] = (val >> 8) & 0xff;
2961 addr[0] = (val & 0xff);
2967 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
2970 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
2971 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
2972 addr[1] << 8 | addr[0]);
2977 * Map a single buffer address.
2981 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2983 struct nfe_dmamap_arg *ctx;
2988 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
2990 ctx = (struct nfe_dmamap_arg *)arg;
2991 ctx->nfe_busaddr = segs[0].ds_addr;
2996 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3002 value = *(int *)arg1;
3003 error = sysctl_handle_int(oidp, &value, 0, req);
3004 if (error || !req->newptr)
3006 if (value < low || value > high)
3008 *(int *)arg1 = value;
3015 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3018 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,
3023 #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \
3024 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
3025 #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \
3026 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d)
3029 nfe_sysctl_node(struct nfe_softc *sc)
3031 struct sysctl_ctx_list *ctx;
3032 struct sysctl_oid_list *child, *parent;
3033 struct sysctl_oid *tree;
3034 struct nfe_hw_stats *stats;
3037 stats = &sc->nfe_stats;
3038 ctx = device_get_sysctl_ctx(sc->nfe_dev);
3039 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev));
3040 SYSCTL_ADD_PROC(ctx, child,
3041 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
3042 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
3043 "max number of Rx events to process");
3045 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3046 error = resource_int_value(device_get_name(sc->nfe_dev),
3047 device_get_unit(sc->nfe_dev), "process_limit",
3048 &sc->nfe_process_limit);
3050 if (sc->nfe_process_limit < NFE_PROC_MIN ||
3051 sc->nfe_process_limit > NFE_PROC_MAX) {
3052 device_printf(sc->nfe_dev,
3053 "process_limit value out of range; "
3054 "using default: %d\n", NFE_PROC_DEFAULT);
3055 sc->nfe_process_limit = NFE_PROC_DEFAULT;
3059 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3062 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
3063 NULL, "NFE statistics");
3064 parent = SYSCTL_CHILDREN(tree);
3066 /* Rx statistics. */
3067 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
3068 NULL, "Rx MAC statistics");
3069 child = SYSCTL_CHILDREN(tree);
3071 NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors",
3072 &stats->rx_frame_errors, "Framing Errors");
3073 NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes",
3074 &stats->rx_extra_bytes, "Extra Bytes");
3075 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3076 &stats->rx_late_cols, "Late Collisions");
3077 NFE_SYSCTL_STAT_ADD32(ctx, child, "runts",
3078 &stats->rx_runts, "Runts");
3079 NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos",
3080 &stats->rx_jumbos, "Jumbos");
3081 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns",
3082 &stats->rx_fifo_overuns, "FIFO Overruns");
3083 NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors",
3084 &stats->rx_crc_errors, "CRC Errors");
3085 NFE_SYSCTL_STAT_ADD32(ctx, child, "fae",
3086 &stats->rx_fae, "Frame Alignment Errors");
3087 NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors",
3088 &stats->rx_len_errors, "Length Errors");
3089 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3090 &stats->rx_unicast, "Unicast Frames");
3091 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3092 &stats->rx_multicast, "Multicast Frames");
3093 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3094 &stats->rx_broadcast, "Broadcast Frames");
3095 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3096 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3097 &stats->rx_octets, "Octets");
3098 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3099 &stats->rx_pause, "Pause frames");
3100 NFE_SYSCTL_STAT_ADD32(ctx, child, "drops",
3101 &stats->rx_drops, "Drop frames");
3104 /* Tx statistics. */
3105 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
3106 NULL, "Tx MAC statistics");
3107 child = SYSCTL_CHILDREN(tree);
3108 NFE_SYSCTL_STAT_ADD64(ctx, child, "octets",
3109 &stats->tx_octets, "Octets");
3110 NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits",
3111 &stats->tx_zero_rexmits, "Zero Retransmits");
3112 NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits",
3113 &stats->tx_one_rexmits, "One Retransmits");
3114 NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits",
3115 &stats->tx_multi_rexmits, "Multiple Retransmits");
3116 NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols",
3117 &stats->tx_late_cols, "Late Collisions");
3118 NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns",
3119 &stats->tx_fifo_underuns, "FIFO Underruns");
3120 NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts",
3121 &stats->tx_carrier_losts, "Carrier Losts");
3122 NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals",
3123 &stats->tx_excess_deferals, "Excess Deferrals");
3124 NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors",
3125 &stats->tx_retry_errors, "Retry Errors");
3126 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3127 NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals",
3128 &stats->tx_deferals, "Deferrals");
3129 NFE_SYSCTL_STAT_ADD32(ctx, child, "frames",
3130 &stats->tx_frames, "Frames");
3131 NFE_SYSCTL_STAT_ADD32(ctx, child, "pause",
3132 &stats->tx_pause, "Pause Frames");
3134 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3135 NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast",
3136 &stats->tx_deferals, "Unicast Frames");
3137 NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast",
3138 &stats->tx_frames, "Multicast Frames");
3139 NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast",
3140 &stats->tx_pause, "Broadcast Frames");
3144 #undef NFE_SYSCTL_STAT_ADD32
3145 #undef NFE_SYSCTL_STAT_ADD64
3148 nfe_stats_clear(struct nfe_softc *sc)
3152 if ((sc->nfe_flags & NFE_MIB_V1) != 0)
3153 mib_cnt = NFE_NUM_MIB_STATV1;
3154 else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0)
3155 mib_cnt = NFE_NUM_MIB_STATV2;
3159 for (i = 0; i < mib_cnt; i += sizeof(uint32_t))
3160 NFE_READ(sc, NFE_TX_OCTET + i);
3162 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3163 NFE_READ(sc, NFE_TX_UNICAST);
3164 NFE_READ(sc, NFE_TX_MULTICAST);
3165 NFE_READ(sc, NFE_TX_BROADCAST);
3170 nfe_stats_update(struct nfe_softc *sc)
3172 struct nfe_hw_stats *stats;
3174 NFE_LOCK_ASSERT(sc);
3176 if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0)
3179 stats = &sc->nfe_stats;
3180 stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET);
3181 stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT);
3182 stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT);
3183 stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT);
3184 stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL);
3185 stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN);
3186 stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST);
3187 stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL);
3188 stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR);
3189 stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR);
3190 stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES);
3191 stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL);
3192 stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT);
3193 stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO);
3194 stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN);
3195 stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR);
3196 stats->rx_fae += NFE_READ(sc, NFE_RX_FAE);
3197 stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR);
3198 stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST);
3199 stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST);
3200 stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST);
3202 if ((sc->nfe_flags & NFE_MIB_V2) != 0) {
3203 stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL);
3204 stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME);
3205 stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET);
3206 stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE);
3207 stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE);
3208 stats->rx_drops += NFE_READ(sc, NFE_RX_DROP);
3211 if ((sc->nfe_flags & NFE_MIB_V3) != 0) {
3212 stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST);
3213 stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST);
3214 stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST);