1 /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */
4 * Copyright (c) 2006 Shigeaki Tagashira <shigeaki@se.hiroshima-u.ac.jp>
5 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD$");
26 #ifdef HAVE_KERNEL_OPTION_HEADERS
27 #include "opt_device_polling.h"
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
35 #include <sys/malloc.h>
36 #include <sys/module.h>
37 #include <sys/kernel.h>
38 #include <sys/queue.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/taskqueue.h>
44 #include <net/if_arp.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_vlan_var.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
58 #include <dev/mii/mii.h>
59 #include <dev/mii/miivar.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
64 #include <dev/nfe/if_nfereg.h>
65 #include <dev/nfe/if_nfevar.h>
67 MODULE_DEPEND(nfe, pci, 1, 1, 1);
68 MODULE_DEPEND(nfe, ether, 1, 1, 1);
69 MODULE_DEPEND(nfe, miibus, 1, 1, 1);
71 /* "device miibus" required. See GENERIC if you get errors here. */
72 #include "miibus_if.h"
74 static int nfe_probe(device_t);
75 static int nfe_attach(device_t);
76 static int nfe_detach(device_t);
77 static int nfe_suspend(device_t);
78 static int nfe_resume(device_t);
79 static int nfe_shutdown(device_t);
80 static void nfe_power(struct nfe_softc *);
81 static int nfe_miibus_readreg(device_t, int, int);
82 static int nfe_miibus_writereg(device_t, int, int, int);
83 static void nfe_miibus_statchg(device_t);
84 static void nfe_link_task(void *, int);
85 static void nfe_set_intr(struct nfe_softc *);
86 static __inline void nfe_enable_intr(struct nfe_softc *);
87 static __inline void nfe_disable_intr(struct nfe_softc *);
88 static int nfe_ioctl(struct ifnet *, u_long, caddr_t);
89 static void nfe_alloc_msix(struct nfe_softc *, int);
90 static int nfe_intr(void *);
91 static void nfe_int_task(void *, int);
92 static void *nfe_jalloc(struct nfe_softc *);
93 static void nfe_jfree(void *, void *);
94 static __inline void nfe_discard_rxbuf(struct nfe_softc *, int);
95 static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int);
96 static int nfe_newbuf(struct nfe_softc *, int);
97 static int nfe_jnewbuf(struct nfe_softc *, int);
98 static int nfe_rxeof(struct nfe_softc *, int);
99 static int nfe_jrxeof(struct nfe_softc *, int);
100 static void nfe_txeof(struct nfe_softc *);
101 static int nfe_encap(struct nfe_softc *, struct mbuf **);
102 static void nfe_setmulti(struct nfe_softc *);
103 static void nfe_tx_task(void *, int);
104 static void nfe_start(struct ifnet *);
105 static void nfe_watchdog(struct ifnet *);
106 static void nfe_init(void *);
107 static void nfe_init_locked(void *);
108 static void nfe_stop(struct ifnet *);
109 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
110 static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
111 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
112 static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
113 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *);
114 static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *);
115 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
116 static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
117 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *);
118 static int nfe_ifmedia_upd(struct ifnet *);
119 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
120 static void nfe_tick(void *);
121 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *);
122 static void nfe_set_macaddr(struct nfe_softc *, uint8_t *);
123 static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int);
125 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
126 static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS);
129 static int nfedebug = 0;
130 #define DPRINTF(sc, ...) do { \
132 device_printf((sc)->nfe_dev, __VA_ARGS__); \
134 #define DPRINTFN(sc, n, ...) do { \
135 if (nfedebug >= (n)) \
136 device_printf((sc)->nfe_dev, __VA_ARGS__); \
139 #define DPRINTF(sc, ...)
140 #define DPRINTFN(sc, n, ...)
143 #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx)
144 #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx)
145 #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED)
147 #define NFE_JLIST_LOCK(_sc) mtx_lock(&(_sc)->nfe_jlist_mtx)
148 #define NFE_JLIST_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_jlist_mtx)
151 static int msi_disable = 0;
152 static int msix_disable = 0;
153 static int jumbo_disable = 0;
154 TUNABLE_INT("hw.nfe.msi_disable", &msi_disable);
155 TUNABLE_INT("hw.nfe.msix_disable", &msix_disable);
156 TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable);
158 static device_method_t nfe_methods[] = {
159 /* Device interface */
160 DEVMETHOD(device_probe, nfe_probe),
161 DEVMETHOD(device_attach, nfe_attach),
162 DEVMETHOD(device_detach, nfe_detach),
163 DEVMETHOD(device_suspend, nfe_suspend),
164 DEVMETHOD(device_resume, nfe_resume),
165 DEVMETHOD(device_shutdown, nfe_shutdown),
168 DEVMETHOD(bus_print_child, bus_generic_print_child),
169 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
172 DEVMETHOD(miibus_readreg, nfe_miibus_readreg),
173 DEVMETHOD(miibus_writereg, nfe_miibus_writereg),
174 DEVMETHOD(miibus_statchg, nfe_miibus_statchg),
179 static driver_t nfe_driver = {
182 sizeof(struct nfe_softc)
185 static devclass_t nfe_devclass;
187 DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0);
188 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0);
190 static struct nfe_type nfe_devs[] = {
191 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN,
192 "NVIDIA nForce MCP Networking Adapter"},
193 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN,
194 "NVIDIA nForce2 MCP2 Networking Adapter"},
195 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1,
196 "NVIDIA nForce2 400 MCP4 Networking Adapter"},
197 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2,
198 "NVIDIA nForce2 400 MCP5 Networking Adapter"},
199 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1,
200 "NVIDIA nForce3 MCP3 Networking Adapter"},
201 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN,
202 "NVIDIA nForce3 250 MCP6 Networking Adapter"},
203 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4,
204 "NVIDIA nForce3 MCP7 Networking Adapter"},
205 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1,
206 "NVIDIA nForce4 CK804 MCP8 Networking Adapter"},
207 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2,
208 "NVIDIA nForce4 CK804 MCP9 Networking Adapter"},
209 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1,
210 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */
211 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2,
212 "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */
213 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1,
214 "NVIDIA nForce 430 MCP12 Networking Adapter"},
215 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2,
216 "NVIDIA nForce 430 MCP13 Networking Adapter"},
217 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1,
218 "NVIDIA nForce MCP55 Networking Adapter"},
219 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2,
220 "NVIDIA nForce MCP55 Networking Adapter"},
221 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1,
222 "NVIDIA nForce MCP61 Networking Adapter"},
223 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2,
224 "NVIDIA nForce MCP61 Networking Adapter"},
225 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3,
226 "NVIDIA nForce MCP61 Networking Adapter"},
227 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4,
228 "NVIDIA nForce MCP61 Networking Adapter"},
229 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1,
230 "NVIDIA nForce MCP65 Networking Adapter"},
231 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2,
232 "NVIDIA nForce MCP65 Networking Adapter"},
233 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3,
234 "NVIDIA nForce MCP65 Networking Adapter"},
235 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4,
236 "NVIDIA nForce MCP65 Networking Adapter"},
237 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1,
238 "NVIDIA nForce MCP67 Networking Adapter"},
239 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2,
240 "NVIDIA nForce MCP67 Networking Adapter"},
241 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3,
242 "NVIDIA nForce MCP67 Networking Adapter"},
243 {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4,
244 "NVIDIA nForce MCP67 Networking Adapter"},
249 /* Probe for supported hardware ID's */
251 nfe_probe(device_t dev)
256 /* Check for matching PCI DEVICE ID's */
257 while (t->name != NULL) {
258 if ((pci_get_vendor(dev) == t->vid_id) &&
259 (pci_get_device(dev) == t->dev_id)) {
260 device_set_desc(dev, t->name);
261 return (BUS_PROBE_DEFAULT);
270 nfe_alloc_msix(struct nfe_softc *sc, int count)
275 sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY,
277 if (sc->nfe_msix_res == NULL) {
278 device_printf(sc->nfe_dev,
279 "couldn't allocate MSIX table resource\n");
283 sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev,
284 SYS_RES_MEMORY, &rid, RF_ACTIVE);
285 if (sc->nfe_msix_pba_res == NULL) {
286 device_printf(sc->nfe_dev,
287 "couldn't allocate MSIX PBA resource\n");
288 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2),
290 sc->nfe_msix_res = NULL;
294 if (pci_alloc_msix(sc->nfe_dev, &count) == 0) {
295 if (count == NFE_MSI_MESSAGES) {
297 device_printf(sc->nfe_dev,
298 "Using %d MSIX messages\n", count);
302 device_printf(sc->nfe_dev,
303 "couldn't allocate MSIX\n");
304 pci_release_msi(sc->nfe_dev);
305 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
306 PCIR_BAR(3), sc->nfe_msix_pba_res);
307 bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY,
308 PCIR_BAR(2), sc->nfe_msix_res);
309 sc->nfe_msix_pba_res = NULL;
310 sc->nfe_msix_res = NULL;
316 nfe_attach(device_t dev)
318 struct nfe_softc *sc;
320 bus_addr_t dma_addr_max;
321 int error = 0, i, msic, reg, rid;
323 sc = device_get_softc(dev);
326 mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
328 mtx_init(&sc->nfe_jlist_mtx, "nfe_jlist_mtx", NULL, MTX_DEF);
329 callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0);
330 TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc);
331 SLIST_INIT(&sc->nfe_jfree_listhead);
332 SLIST_INIT(&sc->nfe_jinuse_listhead);
334 pci_enable_busmaster(dev);
337 sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
339 if (sc->nfe_res[0] == NULL) {
340 device_printf(dev, "couldn't map memory resources\n");
341 mtx_destroy(&sc->nfe_mtx);
345 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) {
348 v = pci_read_config(dev, reg + 0x08, 2);
349 /* Change max. read request size to 4096. */
352 pci_write_config(dev, reg + 0x08, v, 2);
354 v = pci_read_config(dev, reg + 0x0c, 2);
355 /* link capability */
357 width = pci_read_config(dev, reg + 0x12, 2);
358 /* negotiated link width */
359 width = (width >> 4) & 0x3f;
361 device_printf(sc->nfe_dev,
362 "warning, negotiated width of link(x%d) != "
363 "max. width of link(x%d)\n", width, v);
366 /* Allocate interrupt */
367 if (msix_disable == 0 || msi_disable == 0) {
368 if (msix_disable == 0 &&
369 (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES)
370 nfe_alloc_msix(sc, msic);
371 if (msi_disable == 0 && sc->nfe_msix == 0 &&
372 (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES &&
373 pci_alloc_msi(dev, &msic) == 0) {
374 if (msic == NFE_MSI_MESSAGES) {
377 "Using %d MSI messages\n", msic);
380 pci_release_msi(dev);
384 if (sc->nfe_msix == 0 && sc->nfe_msi == 0) {
386 sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
387 RF_SHAREABLE | RF_ACTIVE);
388 if (sc->nfe_irq[0] == NULL) {
389 device_printf(dev, "couldn't allocate IRQ resources\n");
394 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
395 sc->nfe_irq[i] = bus_alloc_resource_any(dev,
396 SYS_RES_IRQ, &rid, RF_ACTIVE);
397 if (sc->nfe_irq[i] == NULL) {
399 "couldn't allocate IRQ resources for "
400 "message %d\n", rid);
405 /* Map interrupts to vector 0. */
406 if (sc->nfe_msix != 0) {
407 NFE_WRITE(sc, NFE_MSIX_MAP0, 0);
408 NFE_WRITE(sc, NFE_MSIX_MAP1, 0);
409 } else if (sc->nfe_msi != 0) {
410 NFE_WRITE(sc, NFE_MSI_MAP0, 0);
411 NFE_WRITE(sc, NFE_MSI_MAP1, 0);
415 /* Set IRQ status/mask register. */
416 sc->nfe_irq_status = NFE_IRQ_STATUS;
417 sc->nfe_irq_mask = NFE_IRQ_MASK;
418 sc->nfe_intrs = NFE_IRQ_WANTED;
420 if (sc->nfe_msix != 0) {
421 sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS;
422 sc->nfe_nointrs = NFE_IRQ_WANTED;
423 } else if (sc->nfe_msi != 0) {
424 sc->nfe_irq_mask = NFE_MSI_IRQ_MASK;
425 sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED;
428 sc->nfe_devid = pci_get_device(dev);
429 sc->nfe_revid = pci_get_revid(dev);
432 switch (sc->nfe_devid) {
433 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2:
434 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3:
435 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4:
436 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5:
437 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM;
439 case PCI_PRODUCT_NVIDIA_MCP51_LAN1:
440 case PCI_PRODUCT_NVIDIA_MCP51_LAN2:
441 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT;
443 case PCI_PRODUCT_NVIDIA_CK804_LAN1:
444 case PCI_PRODUCT_NVIDIA_CK804_LAN2:
445 case PCI_PRODUCT_NVIDIA_MCP04_LAN1:
446 case PCI_PRODUCT_NVIDIA_MCP04_LAN2:
447 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM;
449 case PCI_PRODUCT_NVIDIA_MCP55_LAN1:
450 case PCI_PRODUCT_NVIDIA_MCP55_LAN2:
451 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM |
452 NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL;
455 case PCI_PRODUCT_NVIDIA_MCP61_LAN1:
456 case PCI_PRODUCT_NVIDIA_MCP61_LAN2:
457 case PCI_PRODUCT_NVIDIA_MCP61_LAN3:
458 case PCI_PRODUCT_NVIDIA_MCP61_LAN4:
459 case PCI_PRODUCT_NVIDIA_MCP67_LAN1:
460 case PCI_PRODUCT_NVIDIA_MCP67_LAN2:
461 case PCI_PRODUCT_NVIDIA_MCP67_LAN3:
462 case PCI_PRODUCT_NVIDIA_MCP67_LAN4:
463 sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT |
464 NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL;
466 case PCI_PRODUCT_NVIDIA_MCP65_LAN1:
467 case PCI_PRODUCT_NVIDIA_MCP65_LAN2:
468 case PCI_PRODUCT_NVIDIA_MCP65_LAN3:
469 case PCI_PRODUCT_NVIDIA_MCP65_LAN4:
470 sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR |
471 NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL;
476 /* Check for reversed ethernet address */
477 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0)
478 sc->nfe_flags |= NFE_CORRECT_MACADDR;
479 nfe_get_macaddr(sc, sc->eaddr);
481 * Allocate the parent bus DMA tag appropriate for PCI.
483 dma_addr_max = BUS_SPACE_MAXADDR_32BIT;
484 if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0)
485 dma_addr_max = NFE_DMA_MAXADDR;
486 error = bus_dma_tag_create(
487 bus_get_dma_tag(sc->nfe_dev), /* parent */
488 1, 0, /* alignment, boundary */
489 dma_addr_max, /* lowaddr */
490 BUS_SPACE_MAXADDR, /* highaddr */
491 NULL, NULL, /* filter, filterarg */
492 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
493 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
495 NULL, NULL, /* lockfunc, lockarg */
496 &sc->nfe_parent_tag);
500 ifp = sc->nfe_ifp = if_alloc(IFT_ETHER);
502 device_printf(dev, "can not if_alloc()\n");
506 TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp);
509 * Allocate Tx and Rx rings.
511 if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0)
514 if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0)
517 nfe_alloc_jrx_ring(sc, &sc->jrxq);
519 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
520 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
521 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
522 &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I",
523 "max number of Rx events to process");
525 sc->nfe_process_limit = NFE_PROC_DEFAULT;
526 error = resource_int_value(device_get_name(dev), device_get_unit(dev),
527 "process_limit", &sc->nfe_process_limit);
529 if (sc->nfe_process_limit < NFE_PROC_MIN ||
530 sc->nfe_process_limit > NFE_PROC_MAX) {
531 device_printf(dev, "process_limit value out of range; "
532 "using default: %d\n", NFE_PROC_DEFAULT);
533 sc->nfe_process_limit = NFE_PROC_DEFAULT;
538 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
539 ifp->if_mtu = ETHERMTU;
540 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
541 ifp->if_ioctl = nfe_ioctl;
542 ifp->if_start = nfe_start;
543 ifp->if_hwassist = 0;
544 ifp->if_capabilities = 0;
545 ifp->if_watchdog = NULL;
546 ifp->if_init = nfe_init;
547 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1);
548 ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1;
549 IFQ_SET_READY(&ifp->if_snd);
551 if (sc->nfe_flags & NFE_HW_CSUM) {
552 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
553 ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO;
555 ifp->if_capenable = ifp->if_capabilities;
557 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
558 /* VLAN capability setup. */
559 ifp->if_capabilities |= IFCAP_VLAN_MTU;
560 if ((sc->nfe_flags & NFE_HW_VLAN) != 0) {
561 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
562 if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0)
563 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM;
565 ifp->if_capenable = ifp->if_capabilities;
568 * Tell the upper layer(s) we support long frames.
569 * Must appear after the call to ether_ifattach() because
570 * ether_ifattach() sets ifi_hdrlen to the default value.
572 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
574 #ifdef DEVICE_POLLING
575 ifp->if_capabilities |= IFCAP_POLLING;
579 if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd,
581 device_printf(dev, "MII without any phy!\n");
585 ether_ifattach(ifp, sc->eaddr);
587 TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc);
588 sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK,
589 taskqueue_thread_enqueue, &sc->nfe_tq);
590 taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq",
591 device_get_nameunit(sc->nfe_dev));
593 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
594 error = bus_setup_intr(dev, sc->nfe_irq[0],
595 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
596 &sc->nfe_intrhand[0]);
598 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
599 error = bus_setup_intr(dev, sc->nfe_irq[i],
600 INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc,
601 &sc->nfe_intrhand[i]);
607 device_printf(dev, "couldn't set up irq\n");
608 taskqueue_free(sc->nfe_tq);
623 nfe_detach(device_t dev)
625 struct nfe_softc *sc;
627 uint8_t eaddr[ETHER_ADDR_LEN];
630 sc = device_get_softc(dev);
631 KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized"));
634 #ifdef DEVICE_POLLING
635 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING)
636 ether_poll_deregister(ifp);
638 if (device_is_attached(dev)) {
641 ifp->if_flags &= ~IFF_UP;
643 callout_drain(&sc->nfe_stat_ch);
644 taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task);
645 taskqueue_drain(taskqueue_swi, &sc->nfe_link_task);
650 /* restore ethernet address */
651 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
652 for (i = 0; i < ETHER_ADDR_LEN; i++) {
653 eaddr[i] = sc->eaddr[5 - i];
656 bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN);
657 nfe_set_macaddr(sc, eaddr);
661 device_delete_child(dev, sc->nfe_miibus);
662 bus_generic_detach(dev);
663 if (sc->nfe_tq != NULL) {
664 taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
665 taskqueue_free(sc->nfe_tq);
669 for (i = 0; i < NFE_MSI_MESSAGES; i++) {
670 if (sc->nfe_intrhand[i] != NULL) {
671 bus_teardown_intr(dev, sc->nfe_irq[i],
672 sc->nfe_intrhand[i]);
673 sc->nfe_intrhand[i] = NULL;
677 if (sc->nfe_msi == 0 && sc->nfe_msix == 0) {
678 if (sc->nfe_irq[0] != NULL)
679 bus_release_resource(dev, SYS_RES_IRQ, 0,
682 for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) {
683 if (sc->nfe_irq[i] != NULL) {
684 bus_release_resource(dev, SYS_RES_IRQ, rid,
686 sc->nfe_irq[i] = NULL;
689 pci_release_msi(dev);
691 if (sc->nfe_msix_pba_res != NULL) {
692 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3),
693 sc->nfe_msix_pba_res);
694 sc->nfe_msix_pba_res = NULL;
696 if (sc->nfe_msix_res != NULL) {
697 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2),
699 sc->nfe_msix_res = NULL;
701 if (sc->nfe_res[0] != NULL) {
702 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
704 sc->nfe_res[0] = NULL;
707 nfe_free_tx_ring(sc, &sc->txq);
708 nfe_free_rx_ring(sc, &sc->rxq);
709 nfe_free_jrx_ring(sc, &sc->jrxq);
711 if (sc->nfe_parent_tag) {
712 bus_dma_tag_destroy(sc->nfe_parent_tag);
713 sc->nfe_parent_tag = NULL;
716 mtx_destroy(&sc->nfe_jlist_mtx);
717 mtx_destroy(&sc->nfe_mtx);
724 nfe_suspend(device_t dev)
726 struct nfe_softc *sc;
728 sc = device_get_softc(dev);
731 nfe_stop(sc->nfe_ifp);
732 sc->nfe_suspended = 1;
740 nfe_resume(device_t dev)
742 struct nfe_softc *sc;
745 sc = device_get_softc(dev);
749 if (ifp->if_flags & IFF_UP)
751 sc->nfe_suspended = 0;
758 /* Take PHY/NIC out of powerdown, from Linux */
760 nfe_power(struct nfe_softc *sc)
764 if ((sc->nfe_flags & NFE_PWR_MGMT) == 0)
766 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2);
767 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC);
769 NFE_WRITE(sc, NFE_MAC_RESET, 0);
771 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2);
772 pwr = NFE_READ(sc, NFE_PWR2_CTL);
773 pwr &= ~NFE_PWR2_WAKEUP_MASK;
774 if (sc->nfe_revid >= 0xa3 &&
775 (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 ||
776 sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2))
777 pwr |= NFE_PWR2_REVA3;
778 NFE_WRITE(sc, NFE_PWR2_CTL, pwr);
783 nfe_miibus_statchg(device_t dev)
785 struct nfe_softc *sc;
787 sc = device_get_softc(dev);
788 taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task);
793 nfe_link_task(void *arg, int pending)
795 struct nfe_softc *sc;
796 struct mii_data *mii;
798 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET;
799 uint32_t gmask, rxctl, txctl, val;
801 sc = (struct nfe_softc *)arg;
805 mii = device_get_softc(sc->nfe_miibus);
807 if (mii == NULL || ifp == NULL) {
812 if (mii->mii_media_status & IFM_ACTIVE) {
813 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
818 phy = NFE_READ(sc, NFE_PHY_IFACE);
819 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T);
821 seed = NFE_READ(sc, NFE_RNDSEED);
822 seed &= ~NFE_SEED_MASK;
824 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) {
825 phy |= NFE_PHY_HDX; /* half-duplex */
826 misc |= NFE_MISC1_HDX;
829 switch (IFM_SUBTYPE(mii->mii_media_active)) {
830 case IFM_1000_T: /* full-duplex only */
831 link |= NFE_MEDIA_1000T;
832 seed |= NFE_SEED_1000T;
833 phy |= NFE_PHY_1000T;
836 link |= NFE_MEDIA_100TX;
837 seed |= NFE_SEED_100TX;
838 phy |= NFE_PHY_100TX;
841 link |= NFE_MEDIA_10T;
842 seed |= NFE_SEED_10T;
846 if ((phy & 0x10000000) != 0) {
847 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)
848 val = NFE_R1_MAGIC_1000;
850 val = NFE_R1_MAGIC_10_100;
852 val = NFE_R1_MAGIC_DEFAULT;
853 NFE_WRITE(sc, NFE_SETUP_R1, val);
855 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */
857 NFE_WRITE(sc, NFE_PHY_IFACE, phy);
858 NFE_WRITE(sc, NFE_MISC1, misc);
859 NFE_WRITE(sc, NFE_LINKSPEED, link);
861 gmask = mii->mii_media_active & IFM_GMASK;
862 if ((gmask & IFM_FDX) != 0) {
863 /* It seems all hardwares supports Rx pause frames. */
864 val = NFE_READ(sc, NFE_RXFILTER);
865 if ((gmask & IFM_FLAG0) != 0)
866 val |= NFE_PFF_RX_PAUSE;
868 val &= ~NFE_PFF_RX_PAUSE;
869 NFE_WRITE(sc, NFE_RXFILTER, val);
870 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
871 val = NFE_READ(sc, NFE_MISC1);
872 if ((gmask & IFM_FLAG1) != 0) {
873 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
874 NFE_TX_PAUSE_FRAME_ENABLE);
875 val |= NFE_MISC1_TX_PAUSE;
877 val &= ~NFE_MISC1_TX_PAUSE;
878 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
879 NFE_TX_PAUSE_FRAME_DISABLE);
881 NFE_WRITE(sc, NFE_MISC1, val);
884 /* disable rx/tx pause frames */
885 val = NFE_READ(sc, NFE_RXFILTER);
886 val &= ~NFE_PFF_RX_PAUSE;
887 NFE_WRITE(sc, NFE_RXFILTER, val);
888 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) {
889 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME,
890 NFE_TX_PAUSE_FRAME_DISABLE);
891 val = NFE_READ(sc, NFE_MISC1);
892 val &= ~NFE_MISC1_TX_PAUSE;
893 NFE_WRITE(sc, NFE_MISC1, val);
897 txctl = NFE_READ(sc, NFE_TX_CTL);
898 rxctl = NFE_READ(sc, NFE_RX_CTL);
899 if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
900 txctl |= NFE_TX_START;
901 rxctl |= NFE_RX_START;
903 txctl &= ~NFE_TX_START;
904 rxctl &= ~NFE_RX_START;
906 NFE_WRITE(sc, NFE_TX_CTL, txctl);
907 NFE_WRITE(sc, NFE_RX_CTL, rxctl);
914 nfe_miibus_readreg(device_t dev, int phy, int reg)
916 struct nfe_softc *sc = device_get_softc(dev);
920 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
922 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
923 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
927 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg);
929 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
931 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
934 if (ntries == NFE_TIMEOUT) {
935 DPRINTFN(sc, 2, "timeout waiting for PHY\n");
939 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) {
940 DPRINTFN(sc, 2, "could not read PHY\n");
944 val = NFE_READ(sc, NFE_PHY_DATA);
945 if (val != 0xffffffff && val != 0)
946 sc->mii_phyaddr = phy;
948 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val);
955 nfe_miibus_writereg(device_t dev, int phy, int reg, int val)
957 struct nfe_softc *sc = device_get_softc(dev);
961 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
963 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) {
964 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY);
968 NFE_WRITE(sc, NFE_PHY_DATA, val);
969 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg;
970 NFE_WRITE(sc, NFE_PHY_CTL, ctl);
972 for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) {
974 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY))
978 if (nfedebug >= 2 && ntries == NFE_TIMEOUT)
979 device_printf(sc->nfe_dev, "could not write to PHY\n");
985 * Allocate a jumbo buffer.
988 nfe_jalloc(struct nfe_softc *sc)
990 struct nfe_jpool_entry *entry;
994 entry = SLIST_FIRST(&sc->nfe_jfree_listhead);
997 NFE_JLIST_UNLOCK(sc);
1001 SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries);
1002 SLIST_INSERT_HEAD(&sc->nfe_jinuse_listhead, entry, jpool_entries);
1004 NFE_JLIST_UNLOCK(sc);
1006 return (sc->jrxq.jslots[entry->slot]);
1010 * Release a jumbo buffer.
1013 nfe_jfree(void *buf, void *args)
1015 struct nfe_softc *sc;
1016 struct nfe_jpool_entry *entry;
1019 /* Extract the softc struct pointer. */
1020 sc = (struct nfe_softc *)args;
1021 KASSERT(sc != NULL, ("%s: can't find softc pointer!", __func__));
1024 /* Calculate the slot this buffer belongs to. */
1025 i = ((vm_offset_t)buf
1026 - (vm_offset_t)sc->jrxq.jpool) / NFE_JLEN;
1027 KASSERT(i >= 0 && i < NFE_JSLOTS,
1028 ("%s: asked to free buffer that we don't manage!", __func__));
1030 entry = SLIST_FIRST(&sc->nfe_jinuse_listhead);
1031 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
1033 SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries);
1034 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry, jpool_entries);
1035 if (SLIST_EMPTY(&sc->nfe_jinuse_listhead))
1038 NFE_JLIST_UNLOCK(sc);
1041 struct nfe_dmamap_arg {
1042 bus_addr_t nfe_busaddr;
1046 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1048 struct nfe_dmamap_arg ctx;
1049 struct nfe_rx_data *data;
1051 int i, error, descsize;
1053 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1054 desc = ring->desc64;
1055 descsize = sizeof (struct nfe_desc64);
1057 desc = ring->desc32;
1058 descsize = sizeof (struct nfe_desc32);
1061 ring->cur = ring->next = 0;
1063 error = bus_dma_tag_create(sc->nfe_parent_tag,
1064 NFE_RING_ALIGN, 0, /* alignment, boundary */
1065 BUS_SPACE_MAXADDR, /* lowaddr */
1066 BUS_SPACE_MAXADDR, /* highaddr */
1067 NULL, NULL, /* filter, filterarg */
1068 NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1069 NFE_RX_RING_COUNT * descsize, /* maxsegsize */
1071 NULL, NULL, /* lockfunc, lockarg */
1072 &ring->rx_desc_tag);
1074 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1078 /* allocate memory to desc */
1079 error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK |
1080 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map);
1082 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1085 if (sc->nfe_flags & NFE_40BIT_ADDR)
1086 ring->desc64 = desc;
1088 ring->desc32 = desc;
1090 /* map desc to device visible address space */
1091 ctx.nfe_busaddr = 0;
1092 error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc,
1093 NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1095 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1098 ring->physaddr = ctx.nfe_busaddr;
1100 error = bus_dma_tag_create(sc->nfe_parent_tag,
1101 1, 0, /* alignment, boundary */
1102 BUS_SPACE_MAXADDR, /* lowaddr */
1103 BUS_SPACE_MAXADDR, /* highaddr */
1104 NULL, NULL, /* filter, filterarg */
1105 MCLBYTES, 1, /* maxsize, nsegments */
1106 MCLBYTES, /* maxsegsize */
1108 NULL, NULL, /* lockfunc, lockarg */
1109 &ring->rx_data_tag);
1111 device_printf(sc->nfe_dev, "could not create Rx DMA tag\n");
1115 error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map);
1117 device_printf(sc->nfe_dev,
1118 "could not create Rx DMA spare map\n");
1123 * Pre-allocate Rx buffers and populate Rx ring.
1125 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1126 data = &sc->rxq.data[i];
1127 data->rx_data_map = NULL;
1129 error = bus_dmamap_create(ring->rx_data_tag, 0,
1130 &data->rx_data_map);
1132 device_printf(sc->nfe_dev,
1133 "could not create Rx DMA map\n");
1144 nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1146 struct nfe_dmamap_arg ctx;
1147 struct nfe_rx_data *data;
1149 struct nfe_jpool_entry *entry;
1151 int i, error, descsize;
1153 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1155 if (jumbo_disable != 0) {
1156 device_printf(sc->nfe_dev, "disabling jumbo frame support\n");
1157 sc->nfe_jumbo_disable = 1;
1161 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1162 desc = ring->jdesc64;
1163 descsize = sizeof (struct nfe_desc64);
1165 desc = ring->jdesc32;
1166 descsize = sizeof (struct nfe_desc32);
1169 ring->jcur = ring->jnext = 0;
1171 /* Create DMA tag for jumbo Rx ring. */
1172 error = bus_dma_tag_create(sc->nfe_parent_tag,
1173 NFE_RING_ALIGN, 0, /* alignment, boundary */
1174 BUS_SPACE_MAXADDR, /* lowaddr */
1175 BUS_SPACE_MAXADDR, /* highaddr */
1176 NULL, NULL, /* filter, filterarg */
1177 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */
1179 NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */
1181 NULL, NULL, /* lockfunc, lockarg */
1182 &ring->jrx_desc_tag);
1184 device_printf(sc->nfe_dev,
1185 "could not create jumbo ring DMA tag\n");
1189 /* Create DMA tag for jumbo buffer blocks. */
1190 error = bus_dma_tag_create(sc->nfe_parent_tag,
1191 PAGE_SIZE, 0, /* alignment, boundary */
1192 BUS_SPACE_MAXADDR, /* lowaddr */
1193 BUS_SPACE_MAXADDR, /* highaddr */
1194 NULL, NULL, /* filter, filterarg */
1195 NFE_JMEM, /* maxsize */
1197 NFE_JMEM, /* maxsegsize */
1199 NULL, NULL, /* lockfunc, lockarg */
1200 &ring->jrx_jumbo_tag);
1202 device_printf(sc->nfe_dev,
1203 "could not create jumbo Rx buffer block DMA tag\n");
1207 /* Create DMA tag for jumbo Rx buffers. */
1208 error = bus_dma_tag_create(sc->nfe_parent_tag,
1209 PAGE_SIZE, 0, /* alignment, boundary */
1210 BUS_SPACE_MAXADDR, /* lowaddr */
1211 BUS_SPACE_MAXADDR, /* highaddr */
1212 NULL, NULL, /* filter, filterarg */
1213 NFE_JLEN, /* maxsize */
1215 NFE_JLEN, /* maxsegsize */
1217 NULL, NULL, /* lockfunc, lockarg */
1218 &ring->jrx_data_tag);
1220 device_printf(sc->nfe_dev,
1221 "could not create jumbo Rx buffer DMA tag\n");
1225 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1226 error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK |
1227 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map);
1229 device_printf(sc->nfe_dev,
1230 "could not allocate DMA'able memory for jumbo Rx ring\n");
1233 if (sc->nfe_flags & NFE_40BIT_ADDR)
1234 ring->jdesc64 = desc;
1236 ring->jdesc32 = desc;
1238 ctx.nfe_busaddr = 0;
1239 error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc,
1240 NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1242 device_printf(sc->nfe_dev,
1243 "could not load DMA'able memory for jumbo Rx ring\n");
1246 ring->jphysaddr = ctx.nfe_busaddr;
1248 /* Create DMA maps for jumbo Rx buffers. */
1249 error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map);
1251 device_printf(sc->nfe_dev,
1252 "could not create jumbo Rx DMA spare map\n");
1256 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1257 data = &sc->jrxq.jdata[i];
1258 data->rx_data_map = NULL;
1260 error = bus_dmamap_create(ring->jrx_data_tag, 0,
1261 &data->rx_data_map);
1263 device_printf(sc->nfe_dev,
1264 "could not create jumbo Rx DMA map\n");
1269 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
1270 error = bus_dmamem_alloc(ring->jrx_jumbo_tag, (void **)&ring->jpool,
1271 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1272 &ring->jrx_jumbo_map);
1274 device_printf(sc->nfe_dev,
1275 "could not allocate DMA'able memory for jumbo pool\n");
1279 ctx.nfe_busaddr = 0;
1280 error = bus_dmamap_load(ring->jrx_jumbo_tag, ring->jrx_jumbo_map,
1281 ring->jpool, NFE_JMEM, nfe_dma_map_segs, &ctx, 0);
1283 device_printf(sc->nfe_dev,
1284 "could not load DMA'able memory for jumbo pool\n");
1289 * Now divide it up into 9K pieces and save the addresses
1293 for (i = 0; i < NFE_JSLOTS; i++) {
1294 ring->jslots[i] = ptr;
1296 entry = malloc(sizeof(struct nfe_jpool_entry), M_DEVBUF,
1298 if (entry == NULL) {
1299 device_printf(sc->nfe_dev,
1300 "no memory for jumbo buffers!\n");
1305 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry,
1313 * Running without jumbo frame support is ok for most cases
1314 * so don't fail on creating dma tag/map for jumbo frame.
1316 nfe_free_jrx_ring(sc, ring);
1317 device_printf(sc->nfe_dev, "disabling jumbo frame support due to "
1318 "resource shortage\n");
1319 sc->nfe_jumbo_disable = 1;
1324 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1330 ring->cur = ring->next = 0;
1331 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1332 desc = ring->desc64;
1333 descsize = sizeof (struct nfe_desc64);
1335 desc = ring->desc32;
1336 descsize = sizeof (struct nfe_desc32);
1338 bzero(desc, descsize * NFE_RX_RING_COUNT);
1339 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1340 if (nfe_newbuf(sc, i) != 0)
1344 bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map,
1345 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1352 nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1358 ring->jcur = ring->jnext = 0;
1359 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1360 desc = ring->jdesc64;
1361 descsize = sizeof (struct nfe_desc64);
1363 desc = ring->jdesc32;
1364 descsize = sizeof (struct nfe_desc32);
1366 bzero(desc, descsize * NFE_RX_RING_COUNT);
1367 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1368 if (nfe_jnewbuf(sc, i) != 0)
1372 bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map,
1373 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1380 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring)
1382 struct nfe_rx_data *data;
1386 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1387 desc = ring->desc64;
1388 descsize = sizeof (struct nfe_desc64);
1390 desc = ring->desc32;
1391 descsize = sizeof (struct nfe_desc32);
1394 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
1395 data = &ring->data[i];
1396 if (data->rx_data_map != NULL) {
1397 bus_dmamap_destroy(ring->rx_data_tag,
1399 data->rx_data_map = NULL;
1401 if (data->m != NULL) {
1406 if (ring->rx_data_tag != NULL) {
1407 if (ring->rx_spare_map != NULL) {
1408 bus_dmamap_destroy(ring->rx_data_tag,
1409 ring->rx_spare_map);
1410 ring->rx_spare_map = NULL;
1412 bus_dma_tag_destroy(ring->rx_data_tag);
1413 ring->rx_data_tag = NULL;
1417 bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map);
1418 bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map);
1419 ring->desc64 = NULL;
1420 ring->desc32 = NULL;
1421 ring->rx_desc_map = NULL;
1423 if (ring->rx_desc_tag != NULL) {
1424 bus_dma_tag_destroy(ring->rx_desc_tag);
1425 ring->rx_desc_tag = NULL;
1431 nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring)
1433 struct nfe_jpool_entry *entry;
1434 struct nfe_rx_data *data;
1438 if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0)
1442 while ((entry = SLIST_FIRST(&sc->nfe_jinuse_listhead))) {
1443 device_printf(sc->nfe_dev,
1444 "asked to free buffer that is in use!\n");
1445 SLIST_REMOVE_HEAD(&sc->nfe_jinuse_listhead, jpool_entries);
1446 SLIST_INSERT_HEAD(&sc->nfe_jfree_listhead, entry,
1450 while (!SLIST_EMPTY(&sc->nfe_jfree_listhead)) {
1451 entry = SLIST_FIRST(&sc->nfe_jfree_listhead);
1452 SLIST_REMOVE_HEAD(&sc->nfe_jfree_listhead, jpool_entries);
1453 free(entry, M_DEVBUF);
1455 NFE_JLIST_UNLOCK(sc);
1457 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1458 desc = ring->jdesc64;
1459 descsize = sizeof (struct nfe_desc64);
1461 desc = ring->jdesc32;
1462 descsize = sizeof (struct nfe_desc32);
1465 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
1466 data = &ring->jdata[i];
1467 if (data->rx_data_map != NULL) {
1468 bus_dmamap_destroy(ring->jrx_data_tag,
1470 data->rx_data_map = NULL;
1472 if (data->m != NULL) {
1477 if (ring->jrx_data_tag != NULL) {
1478 if (ring->jrx_spare_map != NULL) {
1479 bus_dmamap_destroy(ring->jrx_data_tag,
1480 ring->jrx_spare_map);
1481 ring->jrx_spare_map = NULL;
1483 bus_dma_tag_destroy(ring->jrx_data_tag);
1484 ring->jrx_data_tag = NULL;
1488 bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map);
1489 bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map);
1490 ring->jdesc64 = NULL;
1491 ring->jdesc32 = NULL;
1492 ring->jrx_desc_map = NULL;
1494 /* Destroy jumbo buffer block. */
1495 if (ring->jrx_jumbo_map != NULL)
1496 bus_dmamap_unload(ring->jrx_jumbo_tag, ring->jrx_jumbo_map);
1497 if (ring->jrx_jumbo_map != NULL) {
1498 bus_dmamem_free(ring->jrx_jumbo_tag, ring->jpool,
1499 ring->jrx_jumbo_map);
1501 ring->jrx_jumbo_map = NULL;
1503 if (ring->jrx_desc_tag != NULL) {
1504 bus_dma_tag_destroy(ring->jrx_desc_tag);
1505 ring->jrx_desc_tag = NULL;
1511 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1513 struct nfe_dmamap_arg ctx;
1518 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1519 desc = ring->desc64;
1520 descsize = sizeof (struct nfe_desc64);
1522 desc = ring->desc32;
1523 descsize = sizeof (struct nfe_desc32);
1527 ring->cur = ring->next = 0;
1529 error = bus_dma_tag_create(sc->nfe_parent_tag,
1530 NFE_RING_ALIGN, 0, /* alignment, boundary */
1531 BUS_SPACE_MAXADDR, /* lowaddr */
1532 BUS_SPACE_MAXADDR, /* highaddr */
1533 NULL, NULL, /* filter, filterarg */
1534 NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */
1535 NFE_TX_RING_COUNT * descsize, /* maxsegsize */
1537 NULL, NULL, /* lockfunc, lockarg */
1538 &ring->tx_desc_tag);
1540 device_printf(sc->nfe_dev, "could not create desc DMA tag\n");
1544 error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK |
1545 BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map);
1547 device_printf(sc->nfe_dev, "could not create desc DMA map\n");
1550 if (sc->nfe_flags & NFE_40BIT_ADDR)
1551 ring->desc64 = desc;
1553 ring->desc32 = desc;
1555 ctx.nfe_busaddr = 0;
1556 error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc,
1557 NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0);
1559 device_printf(sc->nfe_dev, "could not load desc DMA map\n");
1562 ring->physaddr = ctx.nfe_busaddr;
1564 error = bus_dma_tag_create(sc->nfe_parent_tag,
1574 &ring->tx_data_tag);
1576 device_printf(sc->nfe_dev, "could not create Tx DMA tag\n");
1580 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1581 error = bus_dmamap_create(ring->tx_data_tag, 0,
1582 &ring->data[i].tx_data_map);
1584 device_printf(sc->nfe_dev,
1585 "could not create Tx DMA map\n");
1596 nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1601 sc->nfe_force_tx = 0;
1603 ring->cur = ring->next = 0;
1604 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1605 desc = ring->desc64;
1606 descsize = sizeof (struct nfe_desc64);
1608 desc = ring->desc32;
1609 descsize = sizeof (struct nfe_desc32);
1611 bzero(desc, descsize * NFE_TX_RING_COUNT);
1613 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1614 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1619 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring)
1621 struct nfe_tx_data *data;
1625 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1626 desc = ring->desc64;
1627 descsize = sizeof (struct nfe_desc64);
1629 desc = ring->desc32;
1630 descsize = sizeof (struct nfe_desc32);
1633 for (i = 0; i < NFE_TX_RING_COUNT; i++) {
1634 data = &ring->data[i];
1636 if (data->m != NULL) {
1637 bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map,
1638 BUS_DMASYNC_POSTWRITE);
1639 bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map);
1643 if (data->tx_data_map != NULL) {
1644 bus_dmamap_destroy(ring->tx_data_tag,
1646 data->tx_data_map = NULL;
1650 if (ring->tx_data_tag != NULL) {
1651 bus_dma_tag_destroy(ring->tx_data_tag);
1652 ring->tx_data_tag = NULL;
1656 bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map,
1657 BUS_DMASYNC_POSTWRITE);
1658 bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map);
1659 bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map);
1660 ring->desc64 = NULL;
1661 ring->desc32 = NULL;
1662 ring->tx_desc_map = NULL;
1663 bus_dma_tag_destroy(ring->tx_desc_tag);
1664 ring->tx_desc_tag = NULL;
1668 #ifdef DEVICE_POLLING
1669 static poll_handler_t nfe_poll;
1673 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1675 struct nfe_softc *sc = ifp->if_softc;
1680 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1685 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1686 nfe_jrxeof(sc, count);
1688 nfe_rxeof(sc, count);
1690 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1691 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1693 if (cmd == POLL_AND_CHECK_STATUS) {
1694 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1698 NFE_WRITE(sc, sc->nfe_irq_status, r);
1700 if (r & NFE_IRQ_LINK) {
1701 NFE_READ(sc, NFE_PHY_STATUS);
1702 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1703 DPRINTF(sc, "link state changed\n");
1708 #endif /* DEVICE_POLLING */
1711 nfe_set_intr(struct nfe_softc *sc)
1714 if (sc->nfe_msi != 0)
1715 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED);
1719 /* In MSIX, a write to mask reegisters behaves as XOR. */
1720 static __inline void
1721 nfe_enable_intr(struct nfe_softc *sc)
1724 if (sc->nfe_msix != 0) {
1725 /* XXX Should have a better way to enable interrupts! */
1726 if (NFE_READ(sc, sc->nfe_irq_mask) == 0)
1727 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1729 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs);
1733 static __inline void
1734 nfe_disable_intr(struct nfe_softc *sc)
1737 if (sc->nfe_msix != 0) {
1738 /* XXX Should have a better way to disable interrupts! */
1739 if (NFE_READ(sc, sc->nfe_irq_mask) != 0)
1740 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1742 NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs);
1747 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1749 struct nfe_softc *sc;
1751 struct mii_data *mii;
1752 int error, init, mask;
1755 ifr = (struct ifreq *) data;
1760 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU)
1762 else if (ifp->if_mtu != ifr->ifr_mtu) {
1763 if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) ||
1764 (sc->nfe_jumbo_disable != 0)) &&
1765 ifr->ifr_mtu > ETHERMTU)
1769 ifp->if_mtu = ifr->ifr_mtu;
1770 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1771 nfe_init_locked(sc);
1778 if (ifp->if_flags & IFF_UP) {
1780 * If only the PROMISC or ALLMULTI flag changes, then
1781 * don't do a full re-init of the chip, just update
1784 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1785 ((ifp->if_flags ^ sc->nfe_if_flags) &
1786 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1789 nfe_init_locked(sc);
1791 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1794 sc->nfe_if_flags = ifp->if_flags;
1800 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1809 mii = device_get_softc(sc->nfe_miibus);
1810 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1813 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1814 #ifdef DEVICE_POLLING
1815 if ((mask & IFCAP_POLLING) != 0) {
1816 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
1817 error = ether_poll_register(nfe_poll, ifp);
1821 nfe_disable_intr(sc);
1822 ifp->if_capenable |= IFCAP_POLLING;
1825 error = ether_poll_deregister(ifp);
1826 /* Enable interrupt even in error case */
1828 nfe_enable_intr(sc);
1829 ifp->if_capenable &= ~IFCAP_POLLING;
1833 #endif /* DEVICE_POLLING */
1834 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1835 (mask & IFCAP_HWCSUM) != 0) {
1836 ifp->if_capenable ^= IFCAP_HWCSUM;
1837 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
1838 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
1839 ifp->if_hwassist |= NFE_CSUM_FEATURES;
1841 ifp->if_hwassist &= ~NFE_CSUM_FEATURES;
1844 if ((sc->nfe_flags & NFE_HW_VLAN) != 0 &&
1845 (mask & IFCAP_VLAN_HWTAGGING) != 0) {
1846 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1851 * It seems that VLAN stripping requires Rx checksum offload.
1852 * Unfortunately FreeBSD has no way to disable only Rx side
1853 * VLAN stripping. So when we know Rx checksum offload is
1854 * disabled turn entire hardware VLAN assist off.
1856 if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) ==
1857 (NFE_HW_CSUM | NFE_HW_VLAN)) {
1858 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
1859 ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING;
1862 if ((sc->nfe_flags & NFE_HW_CSUM) != 0 &&
1863 (mask & IFCAP_TSO4) != 0) {
1864 ifp->if_capenable ^= IFCAP_TSO4;
1865 if ((IFCAP_TSO4 & ifp->if_capenable) != 0 &&
1866 (IFCAP_TSO4 & ifp->if_capabilities) != 0)
1867 ifp->if_hwassist |= CSUM_TSO;
1869 ifp->if_hwassist &= ~CSUM_TSO;
1872 if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1873 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1876 if ((sc->nfe_flags & NFE_HW_VLAN) != 0)
1877 VLAN_CAPABILITIES(ifp);
1880 error = ether_ioctl(ifp, cmd, data);
1891 struct nfe_softc *sc;
1894 sc = (struct nfe_softc *)arg;
1896 status = NFE_READ(sc, sc->nfe_irq_status);
1897 if (status == 0 || status == 0xffffffff)
1898 return (FILTER_STRAY);
1899 nfe_disable_intr(sc);
1900 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1902 return (FILTER_HANDLED);
1907 nfe_int_task(void *arg, int pending)
1909 struct nfe_softc *sc = arg;
1910 struct ifnet *ifp = sc->nfe_ifp;
1916 if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) {
1917 nfe_enable_intr(sc);
1919 return; /* not for us */
1921 NFE_WRITE(sc, sc->nfe_irq_status, r);
1923 DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r);
1925 #ifdef DEVICE_POLLING
1926 if (ifp->if_capenable & IFCAP_POLLING) {
1932 if (r & NFE_IRQ_LINK) {
1933 NFE_READ(sc, NFE_PHY_STATUS);
1934 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
1935 DPRINTF(sc, "link state changed\n");
1938 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1940 nfe_enable_intr(sc);
1946 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN)
1947 domore = nfe_jrxeof(sc, sc->nfe_process_limit);
1949 domore = nfe_rxeof(sc, sc->nfe_process_limit);
1953 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1954 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
1958 if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) {
1959 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task);
1963 /* Reenable interrupts. */
1964 nfe_enable_intr(sc);
1968 static __inline void
1969 nfe_discard_rxbuf(struct nfe_softc *sc, int idx)
1971 struct nfe_desc32 *desc32;
1972 struct nfe_desc64 *desc64;
1973 struct nfe_rx_data *data;
1976 data = &sc->rxq.data[idx];
1979 if (sc->nfe_flags & NFE_40BIT_ADDR) {
1980 desc64 = &sc->rxq.desc64[idx];
1981 /* VLAN packet may have overwritten it. */
1982 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
1983 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
1984 desc64->length = htole16(m->m_len);
1985 desc64->flags = htole16(NFE_RX_READY);
1987 desc32 = &sc->rxq.desc32[idx];
1988 desc32->length = htole16(m->m_len);
1989 desc32->flags = htole16(NFE_RX_READY);
1994 static __inline void
1995 nfe_discard_jrxbuf(struct nfe_softc *sc, int idx)
1997 struct nfe_desc32 *desc32;
1998 struct nfe_desc64 *desc64;
1999 struct nfe_rx_data *data;
2002 data = &sc->jrxq.jdata[idx];
2005 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2006 desc64 = &sc->jrxq.jdesc64[idx];
2007 /* VLAN packet may have overwritten it. */
2008 desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr));
2009 desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr));
2010 desc64->length = htole16(m->m_len);
2011 desc64->flags = htole16(NFE_RX_READY);
2013 desc32 = &sc->jrxq.jdesc32[idx];
2014 desc32->length = htole16(m->m_len);
2015 desc32->flags = htole16(NFE_RX_READY);
2021 nfe_newbuf(struct nfe_softc *sc, int idx)
2023 struct nfe_rx_data *data;
2024 struct nfe_desc32 *desc32;
2025 struct nfe_desc64 *desc64;
2027 bus_dma_segment_t segs[1];
2031 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2035 m->m_len = m->m_pkthdr.len = MCLBYTES;
2036 m_adj(m, ETHER_ALIGN);
2038 if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map,
2039 m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2043 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2045 data = &sc->rxq.data[idx];
2046 if (data->m != NULL) {
2047 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2048 BUS_DMASYNC_POSTREAD);
2049 bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map);
2051 map = data->rx_data_map;
2052 data->rx_data_map = sc->rxq.rx_spare_map;
2053 sc->rxq.rx_spare_map = map;
2054 bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map,
2055 BUS_DMASYNC_PREREAD);
2056 data->paddr = segs[0].ds_addr;
2058 /* update mapping address in h/w descriptor */
2059 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2060 desc64 = &sc->rxq.desc64[idx];
2061 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2062 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2063 desc64->length = htole16(segs[0].ds_len);
2064 desc64->flags = htole16(NFE_RX_READY);
2066 desc32 = &sc->rxq.desc32[idx];
2067 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2068 desc32->length = htole16(segs[0].ds_len);
2069 desc32->flags = htole16(NFE_RX_READY);
2077 nfe_jnewbuf(struct nfe_softc *sc, int idx)
2079 struct nfe_rx_data *data;
2080 struct nfe_desc32 *desc32;
2081 struct nfe_desc64 *desc64;
2083 bus_dma_segment_t segs[1];
2088 MGETHDR(m, M_DONTWAIT, MT_DATA);
2091 buf = nfe_jalloc(sc);
2096 /* Attach the buffer to the mbuf. */
2097 MEXTADD(m, buf, NFE_JLEN, nfe_jfree, buf, (struct nfe_softc *)sc, 0,
2099 if ((m->m_flags & M_EXT) == 0) {
2103 m->m_pkthdr.len = m->m_len = NFE_JLEN;
2104 m_adj(m, ETHER_ALIGN);
2106 if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag,
2107 sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) {
2111 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2113 data = &sc->jrxq.jdata[idx];
2114 if (data->m != NULL) {
2115 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2116 BUS_DMASYNC_POSTREAD);
2117 bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map);
2119 map = data->rx_data_map;
2120 data->rx_data_map = sc->jrxq.jrx_spare_map;
2121 sc->jrxq.jrx_spare_map = map;
2122 bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map,
2123 BUS_DMASYNC_PREREAD);
2124 data->paddr = segs[0].ds_addr;
2126 /* update mapping address in h/w descriptor */
2127 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2128 desc64 = &sc->jrxq.jdesc64[idx];
2129 desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr));
2130 desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2131 desc64->length = htole16(segs[0].ds_len);
2132 desc64->flags = htole16(NFE_RX_READY);
2134 desc32 = &sc->jrxq.jdesc32[idx];
2135 desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr));
2136 desc32->length = htole16(segs[0].ds_len);
2137 desc32->flags = htole16(NFE_RX_READY);
2145 nfe_rxeof(struct nfe_softc *sc, int count)
2147 struct ifnet *ifp = sc->nfe_ifp;
2148 struct nfe_desc32 *desc32;
2149 struct nfe_desc64 *desc64;
2150 struct nfe_rx_data *data;
2156 NFE_LOCK_ASSERT(sc);
2158 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2159 BUS_DMASYNC_POSTREAD);
2161 for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
2166 data = &sc->rxq.data[sc->rxq.cur];
2168 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2169 desc64 = &sc->rxq.desc64[sc->rxq.cur];
2170 vtag = le32toh(desc64->physaddr[1]);
2171 flags = le16toh(desc64->flags);
2172 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2174 desc32 = &sc->rxq.desc32[sc->rxq.cur];
2175 flags = le16toh(desc32->flags);
2176 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2179 if (flags & NFE_RX_READY)
2182 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2183 if (!(flags & NFE_RX_VALID_V1)) {
2185 nfe_discard_rxbuf(sc, sc->rxq.cur);
2188 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2189 flags &= ~NFE_RX_ERROR;
2190 len--; /* fix buffer length */
2193 if (!(flags & NFE_RX_VALID_V2)) {
2195 nfe_discard_rxbuf(sc, sc->rxq.cur);
2199 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2200 flags &= ~NFE_RX_ERROR;
2201 len--; /* fix buffer length */
2205 if (flags & NFE_RX_ERROR) {
2207 nfe_discard_rxbuf(sc, sc->rxq.cur);
2212 if (nfe_newbuf(sc, sc->rxq.cur) != 0) {
2214 nfe_discard_rxbuf(sc, sc->rxq.cur);
2218 if ((vtag & NFE_RX_VTAG) != 0 &&
2219 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2220 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2221 m->m_flags |= M_VLANTAG;
2224 m->m_pkthdr.len = m->m_len = len;
2225 m->m_pkthdr.rcvif = ifp;
2227 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2228 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2229 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2230 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2231 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2232 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2233 m->m_pkthdr.csum_flags |=
2234 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2235 m->m_pkthdr.csum_data = 0xffff;
2243 (*ifp->if_input)(ifp, m);
2248 bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
2249 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2251 return (count > 0 ? 0 : EAGAIN);
2256 nfe_jrxeof(struct nfe_softc *sc, int count)
2258 struct ifnet *ifp = sc->nfe_ifp;
2259 struct nfe_desc32 *desc32;
2260 struct nfe_desc64 *desc64;
2261 struct nfe_rx_data *data;
2267 NFE_LOCK_ASSERT(sc);
2269 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2270 BUS_DMASYNC_POSTREAD);
2272 for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
2278 data = &sc->jrxq.jdata[sc->jrxq.jcur];
2280 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2281 desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur];
2282 vtag = le32toh(desc64->physaddr[1]);
2283 flags = le16toh(desc64->flags);
2284 len = le16toh(desc64->length) & NFE_RX_LEN_MASK;
2286 desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur];
2287 flags = le16toh(desc32->flags);
2288 len = le16toh(desc32->length) & NFE_RX_LEN_MASK;
2291 if (flags & NFE_RX_READY)
2294 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2295 if (!(flags & NFE_RX_VALID_V1)) {
2297 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2300 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) {
2301 flags &= ~NFE_RX_ERROR;
2302 len--; /* fix buffer length */
2305 if (!(flags & NFE_RX_VALID_V2)) {
2307 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2311 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) {
2312 flags &= ~NFE_RX_ERROR;
2313 len--; /* fix buffer length */
2317 if (flags & NFE_RX_ERROR) {
2319 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2324 if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) {
2326 nfe_discard_jrxbuf(sc, sc->jrxq.jcur);
2330 if ((vtag & NFE_RX_VTAG) != 0 &&
2331 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2332 m->m_pkthdr.ether_vtag = vtag & 0xffff;
2333 m->m_flags |= M_VLANTAG;
2336 m->m_pkthdr.len = m->m_len = len;
2337 m->m_pkthdr.rcvif = ifp;
2339 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
2340 if ((flags & NFE_RX_IP_CSUMOK) != 0) {
2341 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2342 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2343 if ((flags & NFE_RX_TCP_CSUMOK) != 0 ||
2344 (flags & NFE_RX_UDP_CSUMOK) != 0) {
2345 m->m_pkthdr.csum_flags |=
2346 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2347 m->m_pkthdr.csum_data = 0xffff;
2355 (*ifp->if_input)(ifp, m);
2360 bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
2361 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2363 return (count > 0 ? 0 : EAGAIN);
2368 nfe_txeof(struct nfe_softc *sc)
2370 struct ifnet *ifp = sc->nfe_ifp;
2371 struct nfe_desc32 *desc32;
2372 struct nfe_desc64 *desc64;
2373 struct nfe_tx_data *data = NULL;
2377 NFE_LOCK_ASSERT(sc);
2379 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2380 BUS_DMASYNC_POSTREAD);
2383 for (cons = sc->txq.next; cons != sc->txq.cur;
2384 NFE_INC(cons, NFE_TX_RING_COUNT)) {
2385 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2386 desc64 = &sc->txq.desc64[cons];
2387 flags = le16toh(desc64->flags);
2389 desc32 = &sc->txq.desc32[cons];
2390 flags = le16toh(desc32->flags);
2393 if (flags & NFE_TX_VALID)
2398 data = &sc->txq.data[cons];
2400 if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) {
2401 if ((flags & NFE_TX_LASTFRAG_V1) == 0)
2403 if ((flags & NFE_TX_ERROR_V1) != 0) {
2404 device_printf(sc->nfe_dev,
2405 "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR);
2411 if ((flags & NFE_TX_LASTFRAG_V2) == 0)
2413 if ((flags & NFE_TX_ERROR_V2) != 0) {
2414 device_printf(sc->nfe_dev,
2415 "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR);
2421 /* last fragment of the mbuf chain transmitted */
2422 KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__));
2423 bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map,
2424 BUS_DMASYNC_POSTWRITE);
2425 bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map);
2431 sc->nfe_force_tx = 0;
2432 sc->txq.next = cons;
2433 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2434 if (sc->txq.queued == 0)
2435 sc->nfe_watchdog_timer = 0;
2440 nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
2442 struct nfe_desc32 *desc32 = NULL;
2443 struct nfe_desc64 *desc64 = NULL;
2445 bus_dma_segment_t segs[NFE_MAX_SCATTER];
2446 int error, i, nsegs, prod, si;
2448 uint16_t cflags, flags;
2451 prod = si = sc->txq.cur;
2452 map = sc->txq.data[prod].tx_data_map;
2454 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
2455 &nsegs, BUS_DMA_NOWAIT);
2456 if (error == EFBIG) {
2457 m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
2464 error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map,
2465 *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
2471 } else if (error != 0)
2479 if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) {
2480 bus_dmamap_unload(sc->txq.tx_data_tag, map);
2487 if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) {
2488 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
2489 cflags |= NFE_TX_IP_CSUM;
2490 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
2491 cflags |= NFE_TX_TCP_UDP_CSUM;
2492 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2493 cflags |= NFE_TX_TCP_UDP_CSUM;
2495 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
2496 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
2498 cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM);
2499 cflags |= NFE_TX_TSO;
2502 for (i = 0; i < nsegs; i++) {
2503 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2504 desc64 = &sc->txq.desc64[prod];
2505 desc64->physaddr[0] =
2506 htole32(NFE_ADDR_HI(segs[i].ds_addr));
2507 desc64->physaddr[1] =
2508 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2510 desc64->length = htole16(segs[i].ds_len - 1);
2511 desc64->flags = htole16(flags);
2513 desc32 = &sc->txq.desc32[prod];
2515 htole32(NFE_ADDR_LO(segs[i].ds_addr));
2516 desc32->length = htole16(segs[i].ds_len - 1);
2517 desc32->flags = htole16(flags);
2521 * Setting of the valid bit in the first descriptor is
2522 * deferred until the whole chain is fully setup.
2524 flags |= NFE_TX_VALID;
2527 NFE_INC(prod, NFE_TX_RING_COUNT);
2531 * the whole mbuf chain has been DMA mapped, fix last/first descriptor.
2532 * csum flags, vtag and TSO belong to the first fragment only.
2534 if (sc->nfe_flags & NFE_40BIT_ADDR) {
2535 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2);
2536 desc64 = &sc->txq.desc64[si];
2537 if ((m->m_flags & M_VLANTAG) != 0)
2538 desc64->vtag = htole32(NFE_TX_VTAG |
2539 m->m_pkthdr.ether_vtag);
2540 if (tso_segsz != 0) {
2543 * The following indicates the descriptor element
2544 * is a 32bit quantity.
2546 desc64->length |= htole16((uint16_t)tso_segsz);
2547 desc64->flags |= htole16(tso_segsz >> 16);
2550 * finally, set the valid/checksum/TSO bit in the first
2553 desc64->flags |= htole16(NFE_TX_VALID | cflags);
2555 if (sc->nfe_flags & NFE_JUMBO_SUP)
2556 desc32->flags |= htole16(NFE_TX_LASTFRAG_V2);
2558 desc32->flags |= htole16(NFE_TX_LASTFRAG_V1);
2559 desc32 = &sc->txq.desc32[si];
2560 if (tso_segsz != 0) {
2563 * The following indicates the descriptor element
2564 * is a 32bit quantity.
2566 desc32->length |= htole16((uint16_t)tso_segsz);
2567 desc32->flags |= htole16(tso_segsz >> 16);
2570 * finally, set the valid/checksum/TSO bit in the first
2573 desc32->flags |= htole16(NFE_TX_VALID | cflags);
2577 prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT;
2578 sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map;
2579 sc->txq.data[prod].tx_data_map = map;
2580 sc->txq.data[prod].m = m;
2582 bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE);
2589 nfe_setmulti(struct nfe_softc *sc)
2591 struct ifnet *ifp = sc->nfe_ifp;
2592 struct ifmultiaddr *ifma;
2595 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN];
2596 uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = {
2597 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2600 NFE_LOCK_ASSERT(sc);
2602 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) {
2603 bzero(addr, ETHER_ADDR_LEN);
2604 bzero(mask, ETHER_ADDR_LEN);
2608 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN);
2609 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN);
2612 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2615 if (ifma->ifma_addr->sa_family != AF_LINK)
2618 addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2619 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2620 u_int8_t mcaddr = addrp[i];
2625 IF_ADDR_UNLOCK(ifp);
2627 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2632 addr[0] |= 0x01; /* make sure multicast bit is set */
2634 NFE_WRITE(sc, NFE_MULTIADDR_HI,
2635 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]);
2636 NFE_WRITE(sc, NFE_MULTIADDR_LO,
2637 addr[5] << 8 | addr[4]);
2638 NFE_WRITE(sc, NFE_MULTIMASK_HI,
2639 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]);
2640 NFE_WRITE(sc, NFE_MULTIMASK_LO,
2641 mask[5] << 8 | mask[4]);
2643 filter = NFE_READ(sc, NFE_RXFILTER);
2644 filter &= NFE_PFF_RX_PAUSE;
2645 filter |= NFE_RXFILTER_MAGIC;
2646 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M;
2647 NFE_WRITE(sc, NFE_RXFILTER, filter);
2652 nfe_tx_task(void *arg, int pending)
2656 ifp = (struct ifnet *)arg;
2662 nfe_start(struct ifnet *ifp)
2664 struct nfe_softc *sc = ifp->if_softc;
2670 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
2671 IFF_DRV_RUNNING || sc->nfe_link == 0) {
2676 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) {
2677 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0);
2681 if (nfe_encap(sc, &m0) != 0) {
2684 IFQ_DRV_PREPEND(&ifp->if_snd, m0);
2685 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
2689 ETHER_BPF_MTAP(ifp, m0);
2693 bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map,
2694 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2697 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2700 * Set a timeout in case the chip goes out to lunch.
2702 sc->nfe_watchdog_timer = 5;
2710 nfe_watchdog(struct ifnet *ifp)
2712 struct nfe_softc *sc = ifp->if_softc;
2714 if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer)
2717 /* Check if we've lost Tx completion interrupt. */
2719 if (sc->txq.queued == 0) {
2720 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2722 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2723 taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task);
2726 /* Check if we've lost start Tx command. */
2728 if (sc->nfe_force_tx <= 3) {
2730 * If this is the case for watchdog timeout, the following
2731 * code should go to nfe_txeof().
2733 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl);
2736 sc->nfe_force_tx = 0;
2738 if_printf(ifp, "watchdog timeout\n");
2740 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2742 nfe_init_locked(sc);
2749 struct nfe_softc *sc = xsc;
2752 nfe_init_locked(sc);
2758 nfe_init_locked(void *xsc)
2760 struct nfe_softc *sc = xsc;
2761 struct ifnet *ifp = sc->nfe_ifp;
2762 struct mii_data *mii;
2766 NFE_LOCK_ASSERT(sc);
2768 mii = device_get_softc(sc->nfe_miibus);
2770 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2775 sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS;
2777 nfe_init_tx_ring(sc, &sc->txq);
2778 if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN))
2779 error = nfe_init_jrx_ring(sc, &sc->jrxq);
2781 error = nfe_init_rx_ring(sc, &sc->rxq);
2783 device_printf(sc->nfe_dev,
2784 "initialization failed: no memory for rx buffers\n");
2790 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0)
2791 val |= NFE_MAC_ADDR_INORDER;
2792 NFE_WRITE(sc, NFE_TX_UNK, val);
2793 NFE_WRITE(sc, NFE_STATUS, 0);
2795 if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0)
2796 NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE);
2798 sc->rxtxctl = NFE_RXTX_BIT2;
2799 if (sc->nfe_flags & NFE_40BIT_ADDR)
2800 sc->rxtxctl |= NFE_RXTX_V3MAGIC;
2801 else if (sc->nfe_flags & NFE_JUMBO_SUP)
2802 sc->rxtxctl |= NFE_RXTX_V2MAGIC;
2804 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2805 sc->rxtxctl |= NFE_RXTX_RXCSUM;
2806 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2807 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP;
2809 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl);
2811 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2813 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2814 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE);
2816 NFE_WRITE(sc, NFE_VTAG_CTL, 0);
2818 NFE_WRITE(sc, NFE_SETUP_R6, 0);
2820 /* set MAC address */
2821 nfe_set_macaddr(sc, IF_LLADDR(ifp));
2823 /* tell MAC where rings are in memory */
2824 if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) {
2825 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2826 NFE_ADDR_HI(sc->jrxq.jphysaddr));
2827 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2828 NFE_ADDR_LO(sc->jrxq.jphysaddr));
2830 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI,
2831 NFE_ADDR_HI(sc->rxq.physaddr));
2832 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO,
2833 NFE_ADDR_LO(sc->rxq.physaddr));
2835 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr));
2836 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr));
2838 NFE_WRITE(sc, NFE_RING_SIZE,
2839 (NFE_RX_RING_COUNT - 1) << 16 |
2840 (NFE_TX_RING_COUNT - 1));
2842 NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize);
2844 /* force MAC to wakeup */
2845 val = NFE_READ(sc, NFE_PWR_STATE);
2846 if ((val & NFE_PWR_WAKEUP) == 0)
2847 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP);
2849 val = NFE_READ(sc, NFE_PWR_STATE);
2850 NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID);
2853 /* configure interrupts coalescing/mitigation */
2854 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT);
2856 /* no interrupt mitigation: one interrupt per packet */
2857 NFE_WRITE(sc, NFE_IMTIMER, 970);
2860 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100);
2861 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC);
2862 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC);
2864 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */
2865 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC);
2867 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC);
2868 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC);
2870 sc->rxtxctl &= ~NFE_RXTX_BIT2;
2871 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl);
2873 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl);
2879 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START);
2882 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START);
2884 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf);
2886 #ifdef DEVICE_POLLING
2887 if (ifp->if_capenable & IFCAP_POLLING)
2888 nfe_disable_intr(sc);
2892 nfe_enable_intr(sc); /* enable interrupts */
2894 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2895 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2900 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
2905 nfe_stop(struct ifnet *ifp)
2907 struct nfe_softc *sc = ifp->if_softc;
2908 struct nfe_rx_ring *rx_ring;
2909 struct nfe_jrx_ring *jrx_ring;
2910 struct nfe_tx_ring *tx_ring;
2911 struct nfe_rx_data *rdata;
2912 struct nfe_tx_data *tdata;
2915 NFE_LOCK_ASSERT(sc);
2917 sc->nfe_watchdog_timer = 0;
2918 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2920 callout_stop(&sc->nfe_stat_ch);
2923 NFE_WRITE(sc, NFE_TX_CTL, 0);
2926 NFE_WRITE(sc, NFE_RX_CTL, 0);
2928 /* disable interrupts */
2929 nfe_disable_intr(sc);
2933 /* free Rx and Tx mbufs still in the queues. */
2935 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2936 rdata = &rx_ring->data[i];
2937 if (rdata->m != NULL) {
2938 bus_dmamap_sync(rx_ring->rx_data_tag,
2939 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2940 bus_dmamap_unload(rx_ring->rx_data_tag,
2941 rdata->rx_data_map);
2947 if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) {
2948 jrx_ring = &sc->jrxq;
2949 for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) {
2950 rdata = &jrx_ring->jdata[i];
2951 if (rdata->m != NULL) {
2952 bus_dmamap_sync(jrx_ring->jrx_data_tag,
2953 rdata->rx_data_map, BUS_DMASYNC_POSTREAD);
2954 bus_dmamap_unload(jrx_ring->jrx_data_tag,
2955 rdata->rx_data_map);
2963 for (i = 0; i < NFE_RX_RING_COUNT; i++) {
2964 tdata = &tx_ring->data[i];
2965 if (tdata->m != NULL) {
2966 bus_dmamap_sync(tx_ring->tx_data_tag,
2967 tdata->tx_data_map, BUS_DMASYNC_POSTWRITE);
2968 bus_dmamap_unload(tx_ring->tx_data_tag,
2969 tdata->tx_data_map);
2978 nfe_ifmedia_upd(struct ifnet *ifp)
2980 struct nfe_softc *sc = ifp->if_softc;
2981 struct mii_data *mii;
2984 mii = device_get_softc(sc->nfe_miibus);
2993 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2995 struct nfe_softc *sc;
2996 struct mii_data *mii;
3001 mii = device_get_softc(sc->nfe_miibus);
3005 ifmr->ifm_active = mii->mii_media_active;
3006 ifmr->ifm_status = mii->mii_media_status;
3013 struct nfe_softc *sc;
3014 struct mii_data *mii;
3017 sc = (struct nfe_softc *)xsc;
3019 NFE_LOCK_ASSERT(sc);
3023 mii = device_get_softc(sc->nfe_miibus);
3026 callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc);
3031 nfe_shutdown(device_t dev)
3033 struct nfe_softc *sc;
3036 sc = device_get_softc(dev);
3041 /* nfe_reset(sc); */
3049 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr)
3053 if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) {
3054 val = NFE_READ(sc, NFE_MACADDR_LO);
3055 addr[0] = (val >> 8) & 0xff;
3056 addr[1] = (val & 0xff);
3058 val = NFE_READ(sc, NFE_MACADDR_HI);
3059 addr[2] = (val >> 24) & 0xff;
3060 addr[3] = (val >> 16) & 0xff;
3061 addr[4] = (val >> 8) & 0xff;
3062 addr[5] = (val & 0xff);
3064 val = NFE_READ(sc, NFE_MACADDR_LO);
3065 addr[5] = (val >> 8) & 0xff;
3066 addr[4] = (val & 0xff);
3068 val = NFE_READ(sc, NFE_MACADDR_HI);
3069 addr[3] = (val >> 24) & 0xff;
3070 addr[2] = (val >> 16) & 0xff;
3071 addr[1] = (val >> 8) & 0xff;
3072 addr[0] = (val & 0xff);
3078 nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr)
3081 NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]);
3082 NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 |
3083 addr[1] << 8 | addr[0]);
3088 * Map a single buffer address.
3092 nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3094 struct nfe_dmamap_arg *ctx;
3099 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
3101 ctx = (struct nfe_dmamap_arg *)arg;
3102 ctx->nfe_busaddr = segs[0].ds_addr;
3107 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3113 value = *(int *)arg1;
3114 error = sysctl_handle_int(oidp, &value, 0, req);
3115 if (error || !req->newptr)
3117 if (value < low || value > high)
3119 *(int *)arg1 = value;
3126 sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS)
3129 return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN,