2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
68 #include <machine/bus.h>
69 #include <machine/in_cksum.h>
71 #include <dev/age/if_agereg.h>
72 #include <dev/age/if_agevar.h>
74 /* "device miibus" required. See GENERIC if you get errors here. */
75 #include "miibus_if.h"
77 #define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
79 MODULE_DEPEND(age, pci, 1, 1, 1);
80 MODULE_DEPEND(age, ether, 1, 1, 1);
81 MODULE_DEPEND(age, miibus, 1, 1, 1);
84 static int msi_disable = 0;
85 static int msix_disable = 0;
86 TUNABLE_INT("hw.age.msi_disable", &msi_disable);
87 TUNABLE_INT("hw.age.msix_disable", &msix_disable);
90 * Devices supported by this driver.
92 static struct age_dev {
93 uint16_t age_vendorid;
94 uint16_t age_deviceid;
97 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1,
98 "Attansic Technology Corp, L1 Gigabit Ethernet" },
101 static int age_miibus_readreg(device_t, int, int);
102 static int age_miibus_writereg(device_t, int, int, int);
103 static void age_miibus_statchg(device_t);
104 static void age_mediastatus(struct ifnet *, struct ifmediareq *);
105 static int age_mediachange(struct ifnet *);
106 static int age_probe(device_t);
107 static void age_get_macaddr(struct age_softc *);
108 static void age_phy_reset(struct age_softc *);
109 static int age_attach(device_t);
110 static int age_detach(device_t);
111 static void age_sysctl_node(struct age_softc *);
112 static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int);
113 static int age_check_boundary(struct age_softc *);
114 static int age_dma_alloc(struct age_softc *);
115 static void age_dma_free(struct age_softc *);
116 static int age_shutdown(device_t);
117 static void age_setwol(struct age_softc *);
118 static int age_suspend(device_t);
119 static int age_resume(device_t);
120 static int age_encap(struct age_softc *, struct mbuf **);
121 static void age_start(struct ifnet *);
122 static void age_start_locked(struct ifnet *);
123 static void age_watchdog(struct age_softc *);
124 static int age_ioctl(struct ifnet *, u_long, caddr_t);
125 static void age_mac_config(struct age_softc *);
126 static void age_link_task(void *, int);
127 static void age_stats_update(struct age_softc *);
128 static int age_intr(void *);
129 static void age_int_task(void *, int);
130 static void age_txintr(struct age_softc *, int);
131 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
132 static int age_rxintr(struct age_softc *, int, int);
133 static void age_tick(void *);
134 static void age_reset(struct age_softc *);
135 static void age_init(void *);
136 static void age_init_locked(struct age_softc *);
137 static void age_stop(struct age_softc *);
138 static void age_stop_txmac(struct age_softc *);
139 static void age_stop_rxmac(struct age_softc *);
140 static void age_init_tx_ring(struct age_softc *);
141 static int age_init_rx_ring(struct age_softc *);
142 static void age_init_rr_ring(struct age_softc *);
143 static void age_init_cmb_block(struct age_softc *);
144 static void age_init_smb_block(struct age_softc *);
145 #ifndef __NO_STRICT_ALIGNMENT
146 static struct mbuf *age_fixup_rx(struct ifnet *, struct mbuf *);
148 static int age_newbuf(struct age_softc *, struct age_rxdesc *);
149 static void age_rxvlan(struct age_softc *);
150 static void age_rxfilter(struct age_softc *);
151 static int sysctl_age_stats(SYSCTL_HANDLER_ARGS);
152 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
153 static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS);
154 static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS);
157 static device_method_t age_methods[] = {
158 /* Device interface. */
159 DEVMETHOD(device_probe, age_probe),
160 DEVMETHOD(device_attach, age_attach),
161 DEVMETHOD(device_detach, age_detach),
162 DEVMETHOD(device_shutdown, age_shutdown),
163 DEVMETHOD(device_suspend, age_suspend),
164 DEVMETHOD(device_resume, age_resume),
167 DEVMETHOD(miibus_readreg, age_miibus_readreg),
168 DEVMETHOD(miibus_writereg, age_miibus_writereg),
169 DEVMETHOD(miibus_statchg, age_miibus_statchg),
174 static driver_t age_driver = {
177 sizeof(struct age_softc)
180 static devclass_t age_devclass;
182 DRIVER_MODULE(age, pci, age_driver, age_devclass, 0, 0);
183 DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, 0, 0);
185 static struct resource_spec age_res_spec_mem[] = {
186 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
190 static struct resource_spec age_irq_spec_legacy[] = {
191 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
195 static struct resource_spec age_irq_spec_msi[] = {
196 { SYS_RES_IRQ, 1, RF_ACTIVE },
200 static struct resource_spec age_irq_spec_msix[] = {
201 { SYS_RES_IRQ, 1, RF_ACTIVE },
206 * Read a PHY register on the MII of the L1.
209 age_miibus_readreg(device_t dev, int phy, int reg)
211 struct age_softc *sc;
215 sc = device_get_softc(dev);
217 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
218 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
219 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
221 v = CSR_READ_4(sc, AGE_MDIO);
222 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
227 device_printf(sc->age_dev, "phy read timeout : %d\n", reg);
231 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
235 * Write a PHY register on the MII of the L1.
238 age_miibus_writereg(device_t dev, int phy, int reg, int val)
240 struct age_softc *sc;
244 sc = device_get_softc(dev);
246 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
247 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
248 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
249 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
251 v = CSR_READ_4(sc, AGE_MDIO);
252 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
257 device_printf(sc->age_dev, "phy write timeout : %d\n", reg);
263 * Callback from MII layer when media changes.
266 age_miibus_statchg(device_t dev)
268 struct age_softc *sc;
270 sc = device_get_softc(dev);
271 taskqueue_enqueue(taskqueue_swi, &sc->age_link_task);
275 * Get the current interface media status.
278 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
280 struct age_softc *sc;
281 struct mii_data *mii;
285 mii = device_get_softc(sc->age_miibus);
288 ifmr->ifm_status = mii->mii_media_status;
289 ifmr->ifm_active = mii->mii_media_active;
294 * Set hardware to newly-selected media.
297 age_mediachange(struct ifnet *ifp)
299 struct age_softc *sc;
300 struct mii_data *mii;
301 struct mii_softc *miisc;
306 mii = device_get_softc(sc->age_miibus);
307 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
309 error = mii_mediachg(mii);
316 age_probe(device_t dev)
320 uint16_t vendor, devid;
322 vendor = pci_get_vendor(dev);
323 devid = pci_get_device(dev);
325 for (i = 0; i < sizeof(age_devs) / sizeof(age_devs[0]);
327 if (vendor == sp->age_vendorid &&
328 devid == sp->age_deviceid) {
329 device_set_desc(dev, sp->age_name);
330 return (BUS_PROBE_DEFAULT);
338 age_get_macaddr(struct age_softc *sc)
343 reg = CSR_READ_4(sc, AGE_SPI_CTRL);
344 if ((reg & SPI_VPD_ENB) != 0) {
345 /* Get VPD stored in TWSI EEPROM. */
347 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
350 if (pci_find_cap(sc->age_dev, PCIY_VPD, &vpdc) == 0) {
352 * PCI VPD capability found, let TWSI reload EEPROM.
353 * This will set ethernet address of controller.
355 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) |
356 TWSI_CTRL_SW_LD_START);
357 for (i = 100; i > 0; i--) {
359 reg = CSR_READ_4(sc, AGE_TWSI_CTRL);
360 if ((reg & TWSI_CTRL_SW_LD_START) == 0)
364 device_printf(sc->age_dev,
365 "reloading EEPROM timeout!\n");
368 device_printf(sc->age_dev,
369 "PCI VPD capability not found!\n");
372 ea[0] = CSR_READ_4(sc, AGE_PAR0);
373 ea[1] = CSR_READ_4(sc, AGE_PAR1);
374 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
375 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
376 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
377 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
378 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
379 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
383 age_phy_reset(struct age_softc *sc)
389 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
391 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
394 #define ATPHY_DBG_ADDR 0x1D
395 #define ATPHY_DBG_DATA 0x1E
396 #define ATPHY_CDTC 0x16
397 #define PHY_CDTC_ENB 0x0001
398 #define PHY_CDTC_POFF 8
399 #define ATPHY_CDTS 0x1C
400 #define PHY_CDTS_STAT_OK 0x0000
401 #define PHY_CDTS_STAT_SHORT 0x0100
402 #define PHY_CDTS_STAT_OPEN 0x0200
403 #define PHY_CDTS_STAT_INVAL 0x0300
404 #define PHY_CDTS_STAT_MASK 0x0300
406 /* Check power saving mode. Magic from Linux. */
407 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET);
408 for (linkup = 0, pn = 0; pn < 4; pn++) {
409 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTC,
410 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB);
411 for (i = 200; i > 0; i--) {
413 reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
415 if ((reg & PHY_CDTC_ENB) == 0)
419 reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
421 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) {
426 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR,
427 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
429 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
431 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
432 ATPHY_DBG_DATA, 0x124E);
433 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
435 reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr,
437 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
438 ATPHY_DBG_DATA, reg | 0x03);
441 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
443 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
444 ATPHY_DBG_DATA, 0x024E);
447 #undef ATPHY_DBG_ADDR
448 #undef ATPHY_DBG_DATA
453 #undef PHY_CDTS_STAT_OK
454 #undef PHY_CDTS_STAT_SHORT
455 #undef PHY_CDTS_STAT_OPEN
456 #undef PHY_CDTS_STAT_INVAL
457 #undef PHY_CDTS_STAT_MASK
461 age_attach(device_t dev)
463 struct age_softc *sc;
466 int error, i, msic, msixc, pmc;
469 sc = device_get_softc(dev);
472 mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
474 callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0);
475 TASK_INIT(&sc->age_int_task, 0, age_int_task, sc);
476 TASK_INIT(&sc->age_link_task, 0, age_link_task, sc);
478 /* Map the device. */
479 pci_enable_busmaster(dev);
480 sc->age_res_spec = age_res_spec_mem;
481 sc->age_irq_spec = age_irq_spec_legacy;
482 error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res);
484 device_printf(dev, "cannot allocate memory resources.\n");
488 /* Set PHY address. */
489 sc->age_phyaddr = AGE_PHY_ADDR;
494 /* Reset the ethernet controller. */
497 /* Get PCI and chip id/revision. */
498 sc->age_rev = pci_get_revid(dev);
499 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
500 MASTER_CHIP_REV_SHIFT;
502 device_printf(dev, "PCI device revision : 0x%04x\n",
504 device_printf(dev, "Chip id/revision : 0x%04x\n",
510 * Unintialized hardware returns an invalid chip id/revision
511 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
512 * unplugged cable results in putting hardware into automatic
513 * power down mode which in turn returns invalld chip revision.
515 if (sc->age_chip_rev == 0xFFFF) {
516 device_printf(dev,"invalid chip revision : 0x%04x -- "
517 "not initialized?\n", sc->age_chip_rev);
522 device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n",
523 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
524 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
526 /* Allocate IRQ resources. */
527 msixc = pci_msix_count(dev);
528 msic = pci_msi_count(dev);
530 device_printf(dev, "MSIX count : %d\n", msixc);
531 device_printf(dev, "MSI count : %d\n", msic);
534 /* Prefer MSIX over MSI. */
535 if (msix_disable == 0 || msi_disable == 0) {
536 if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES &&
537 pci_alloc_msix(dev, &msixc) == 0) {
538 if (msic == AGE_MSIX_MESSAGES) {
539 device_printf(dev, "Using %d MSIX messages.\n",
541 sc->age_flags |= AGE_FLAG_MSIX;
542 sc->age_irq_spec = age_irq_spec_msix;
544 pci_release_msi(dev);
546 if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 &&
547 msic == AGE_MSI_MESSAGES &&
548 pci_alloc_msi(dev, &msic) == 0) {
549 if (msic == AGE_MSI_MESSAGES) {
550 device_printf(dev, "Using %d MSI messages.\n",
552 sc->age_flags |= AGE_FLAG_MSI;
553 sc->age_irq_spec = age_irq_spec_msi;
555 pci_release_msi(dev);
559 error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq);
561 device_printf(dev, "cannot allocate IRQ resources.\n");
566 /* Get DMA parameters from PCIe device control register. */
567 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
568 sc->age_flags |= AGE_FLAG_PCIE;
569 burst = pci_read_config(dev, i + 0x08, 2);
570 /* Max read request size. */
571 sc->age_dma_rd_burst = ((burst >> 12) & 0x07) <<
572 DMA_CFG_RD_BURST_SHIFT;
573 /* Max payload size. */
574 sc->age_dma_wr_burst = ((burst >> 5) & 0x07) <<
575 DMA_CFG_WR_BURST_SHIFT;
577 device_printf(dev, "Read request size : %d bytes.\n",
578 128 << ((burst >> 12) & 0x07));
579 device_printf(dev, "TLP payload size : %d bytes.\n",
580 128 << ((burst >> 5) & 0x07));
583 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
584 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
587 /* Create device sysctl node. */
590 if ((error = age_dma_alloc(sc) != 0))
593 /* Load station address. */
596 ifp = sc->age_ifp = if_alloc(IFT_ETHER);
598 device_printf(dev, "cannot allocate ifnet structure.\n");
604 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
605 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
606 ifp->if_ioctl = age_ioctl;
607 ifp->if_start = age_start;
608 ifp->if_init = age_init;
609 ifp->if_snd.ifq_drv_maxlen = AGE_TX_RING_CNT - 1;
610 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
611 IFQ_SET_READY(&ifp->if_snd);
612 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
613 ifp->if_hwassist = AGE_CSUM_FEATURES | CSUM_TSO;
614 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
615 sc->age_flags |= AGE_FLAG_PMCAP;
616 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
618 ifp->if_capenable = ifp->if_capabilities;
620 /* Set up MII bus. */
621 error = mii_attach(dev, &sc->age_miibus, ifp, age_mediachange,
622 age_mediastatus, BMSR_DEFCAPMASK, sc->age_phyaddr, MII_OFFSET_ANY,
625 device_printf(dev, "attaching PHYs failed\n");
629 ether_ifattach(ifp, sc->age_eaddr);
631 /* VLAN capability setup. */
632 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
633 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
634 ifp->if_capenable = ifp->if_capabilities;
636 /* Tell the upper layer(s) we support long frames. */
637 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
639 /* Create local taskq. */
640 sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
641 taskqueue_thread_enqueue, &sc->age_tq);
642 if (sc->age_tq == NULL) {
643 device_printf(dev, "could not create taskqueue.\n");
648 taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
649 device_get_nameunit(sc->age_dev));
651 if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
652 msic = AGE_MSIX_MESSAGES;
653 else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
654 msic = AGE_MSI_MESSAGES;
657 for (i = 0; i < msic; i++) {
658 error = bus_setup_intr(dev, sc->age_irq[i],
659 INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc,
660 &sc->age_intrhand[i]);
665 device_printf(dev, "could not set up interrupt handler.\n");
666 taskqueue_free(sc->age_tq);
680 age_detach(device_t dev)
682 struct age_softc *sc;
686 sc = device_get_softc(dev);
689 if (device_is_attached(dev)) {
691 sc->age_flags |= AGE_FLAG_DETACH;
694 callout_drain(&sc->age_tick_ch);
695 taskqueue_drain(sc->age_tq, &sc->age_int_task);
696 taskqueue_drain(taskqueue_swi, &sc->age_link_task);
700 if (sc->age_tq != NULL) {
701 taskqueue_drain(sc->age_tq, &sc->age_int_task);
702 taskqueue_free(sc->age_tq);
706 if (sc->age_miibus != NULL) {
707 device_delete_child(dev, sc->age_miibus);
708 sc->age_miibus = NULL;
710 bus_generic_detach(dev);
718 if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
719 msic = AGE_MSIX_MESSAGES;
720 else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
721 msic = AGE_MSI_MESSAGES;
724 for (i = 0; i < msic; i++) {
725 if (sc->age_intrhand[i] != NULL) {
726 bus_teardown_intr(dev, sc->age_irq[i],
727 sc->age_intrhand[i]);
728 sc->age_intrhand[i] = NULL;
732 bus_release_resources(dev, sc->age_irq_spec, sc->age_irq);
733 if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0)
734 pci_release_msi(dev);
735 bus_release_resources(dev, sc->age_res_spec, sc->age_res);
736 mtx_destroy(&sc->age_mtx);
742 age_sysctl_node(struct age_softc *sc)
746 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
747 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
748 "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats,
751 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
752 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
753 "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0,
754 sysctl_hw_age_int_mod, "I", "age interrupt moderation");
756 /* Pull in device tunables. */
757 sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
758 error = resource_int_value(device_get_name(sc->age_dev),
759 device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod);
761 if (sc->age_int_mod < AGE_IM_TIMER_MIN ||
762 sc->age_int_mod > AGE_IM_TIMER_MAX) {
763 device_printf(sc->age_dev,
764 "int_mod value out of range; using default: %d\n",
765 AGE_IM_TIMER_DEFAULT);
766 sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
770 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
771 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
772 "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->age_process_limit,
773 0, sysctl_hw_age_proc_limit, "I",
774 "max number of Rx events to process");
776 /* Pull in device tunables. */
777 sc->age_process_limit = AGE_PROC_DEFAULT;
778 error = resource_int_value(device_get_name(sc->age_dev),
779 device_get_unit(sc->age_dev), "process_limit",
780 &sc->age_process_limit);
782 if (sc->age_process_limit < AGE_PROC_MIN ||
783 sc->age_process_limit > AGE_PROC_MAX) {
784 device_printf(sc->age_dev,
785 "process_limit value out of range; "
786 "using default: %d\n", AGE_PROC_DEFAULT);
787 sc->age_process_limit = AGE_PROC_DEFAULT;
792 struct age_dmamap_arg {
793 bus_addr_t age_busaddr;
797 age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
799 struct age_dmamap_arg *ctx;
804 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
806 ctx = (struct age_dmamap_arg *)arg;
807 ctx->age_busaddr = segs[0].ds_addr;
811 * Attansic L1 controller have single register to specify high
812 * address part of DMA blocks. So all descriptor structures and
813 * DMA memory blocks should have the same high address of given
814 * 4GB address space(i.e. crossing 4GB boundary is not allowed).
817 age_check_boundary(struct age_softc *sc)
819 bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end;
820 bus_addr_t cmb_block_end, smb_block_end;
822 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
823 tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ;
824 rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ;
825 rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ;
826 cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ;
827 smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ;
829 if ((AGE_ADDR_HI(tx_ring_end) !=
830 AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) ||
831 (AGE_ADDR_HI(rx_ring_end) !=
832 AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) ||
833 (AGE_ADDR_HI(rr_ring_end) !=
834 AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) ||
835 (AGE_ADDR_HI(cmb_block_end) !=
836 AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) ||
837 (AGE_ADDR_HI(smb_block_end) !=
838 AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr)))
841 if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) ||
842 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) ||
843 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) ||
844 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end)))
851 age_dma_alloc(struct age_softc *sc)
853 struct age_txdesc *txd;
854 struct age_rxdesc *rxd;
856 struct age_dmamap_arg ctx;
859 lowaddr = BUS_SPACE_MAXADDR;
862 /* Create parent ring/DMA block tag. */
863 error = bus_dma_tag_create(
864 bus_get_dma_tag(sc->age_dev), /* parent */
865 1, 0, /* alignment, boundary */
866 lowaddr, /* lowaddr */
867 BUS_SPACE_MAXADDR, /* highaddr */
868 NULL, NULL, /* filter, filterarg */
869 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
871 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
873 NULL, NULL, /* lockfunc, lockarg */
874 &sc->age_cdata.age_parent_tag);
876 device_printf(sc->age_dev,
877 "could not create parent DMA tag.\n");
881 /* Create tag for Tx ring. */
882 error = bus_dma_tag_create(
883 sc->age_cdata.age_parent_tag, /* parent */
884 AGE_TX_RING_ALIGN, 0, /* alignment, boundary */
885 BUS_SPACE_MAXADDR, /* lowaddr */
886 BUS_SPACE_MAXADDR, /* highaddr */
887 NULL, NULL, /* filter, filterarg */
888 AGE_TX_RING_SZ, /* maxsize */
890 AGE_TX_RING_SZ, /* maxsegsize */
892 NULL, NULL, /* lockfunc, lockarg */
893 &sc->age_cdata.age_tx_ring_tag);
895 device_printf(sc->age_dev,
896 "could not create Tx ring DMA tag.\n");
900 /* Create tag for Rx ring. */
901 error = bus_dma_tag_create(
902 sc->age_cdata.age_parent_tag, /* parent */
903 AGE_RX_RING_ALIGN, 0, /* alignment, boundary */
904 BUS_SPACE_MAXADDR, /* lowaddr */
905 BUS_SPACE_MAXADDR, /* highaddr */
906 NULL, NULL, /* filter, filterarg */
907 AGE_RX_RING_SZ, /* maxsize */
909 AGE_RX_RING_SZ, /* maxsegsize */
911 NULL, NULL, /* lockfunc, lockarg */
912 &sc->age_cdata.age_rx_ring_tag);
914 device_printf(sc->age_dev,
915 "could not create Rx ring DMA tag.\n");
919 /* Create tag for Rx return ring. */
920 error = bus_dma_tag_create(
921 sc->age_cdata.age_parent_tag, /* parent */
922 AGE_RR_RING_ALIGN, 0, /* alignment, boundary */
923 BUS_SPACE_MAXADDR, /* lowaddr */
924 BUS_SPACE_MAXADDR, /* highaddr */
925 NULL, NULL, /* filter, filterarg */
926 AGE_RR_RING_SZ, /* maxsize */
928 AGE_RR_RING_SZ, /* maxsegsize */
930 NULL, NULL, /* lockfunc, lockarg */
931 &sc->age_cdata.age_rr_ring_tag);
933 device_printf(sc->age_dev,
934 "could not create Rx return ring DMA tag.\n");
938 /* Create tag for coalesing message block. */
939 error = bus_dma_tag_create(
940 sc->age_cdata.age_parent_tag, /* parent */
941 AGE_CMB_ALIGN, 0, /* alignment, boundary */
942 BUS_SPACE_MAXADDR, /* lowaddr */
943 BUS_SPACE_MAXADDR, /* highaddr */
944 NULL, NULL, /* filter, filterarg */
945 AGE_CMB_BLOCK_SZ, /* maxsize */
947 AGE_CMB_BLOCK_SZ, /* maxsegsize */
949 NULL, NULL, /* lockfunc, lockarg */
950 &sc->age_cdata.age_cmb_block_tag);
952 device_printf(sc->age_dev,
953 "could not create CMB DMA tag.\n");
957 /* Create tag for statistics message block. */
958 error = bus_dma_tag_create(
959 sc->age_cdata.age_parent_tag, /* parent */
960 AGE_SMB_ALIGN, 0, /* alignment, boundary */
961 BUS_SPACE_MAXADDR, /* lowaddr */
962 BUS_SPACE_MAXADDR, /* highaddr */
963 NULL, NULL, /* filter, filterarg */
964 AGE_SMB_BLOCK_SZ, /* maxsize */
966 AGE_SMB_BLOCK_SZ, /* maxsegsize */
968 NULL, NULL, /* lockfunc, lockarg */
969 &sc->age_cdata.age_smb_block_tag);
971 device_printf(sc->age_dev,
972 "could not create SMB DMA tag.\n");
976 /* Allocate DMA'able memory and load the DMA map. */
977 error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag,
978 (void **)&sc->age_rdata.age_tx_ring,
979 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
980 &sc->age_cdata.age_tx_ring_map);
982 device_printf(sc->age_dev,
983 "could not allocate DMA'able memory for Tx ring.\n");
987 error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag,
988 sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring,
989 AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0);
990 if (error != 0 || ctx.age_busaddr == 0) {
991 device_printf(sc->age_dev,
992 "could not load DMA'able memory for Tx ring.\n");
995 sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr;
997 error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag,
998 (void **)&sc->age_rdata.age_rx_ring,
999 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1000 &sc->age_cdata.age_rx_ring_map);
1002 device_printf(sc->age_dev,
1003 "could not allocate DMA'able memory for Rx ring.\n");
1006 ctx.age_busaddr = 0;
1007 error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag,
1008 sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring,
1009 AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0);
1010 if (error != 0 || ctx.age_busaddr == 0) {
1011 device_printf(sc->age_dev,
1012 "could not load DMA'able memory for Rx ring.\n");
1015 sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr;
1016 /* Rx return ring */
1017 error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag,
1018 (void **)&sc->age_rdata.age_rr_ring,
1019 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1020 &sc->age_cdata.age_rr_ring_map);
1022 device_printf(sc->age_dev,
1023 "could not allocate DMA'able memory for Rx return ring.\n");
1026 ctx.age_busaddr = 0;
1027 error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag,
1028 sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring,
1029 AGE_RR_RING_SZ, age_dmamap_cb,
1031 if (error != 0 || ctx.age_busaddr == 0) {
1032 device_printf(sc->age_dev,
1033 "could not load DMA'able memory for Rx return ring.\n");
1036 sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr;
1038 error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag,
1039 (void **)&sc->age_rdata.age_cmb_block,
1040 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1041 &sc->age_cdata.age_cmb_block_map);
1043 device_printf(sc->age_dev,
1044 "could not allocate DMA'able memory for CMB block.\n");
1047 ctx.age_busaddr = 0;
1048 error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag,
1049 sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block,
1050 AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
1051 if (error != 0 || ctx.age_busaddr == 0) {
1052 device_printf(sc->age_dev,
1053 "could not load DMA'able memory for CMB block.\n");
1056 sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr;
1058 error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag,
1059 (void **)&sc->age_rdata.age_smb_block,
1060 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1061 &sc->age_cdata.age_smb_block_map);
1063 device_printf(sc->age_dev,
1064 "could not allocate DMA'able memory for SMB block.\n");
1067 ctx.age_busaddr = 0;
1068 error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag,
1069 sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block,
1070 AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
1071 if (error != 0 || ctx.age_busaddr == 0) {
1072 device_printf(sc->age_dev,
1073 "could not load DMA'able memory for SMB block.\n");
1076 sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr;
1079 * All ring buffer and DMA blocks should have the same
1080 * high address part of 64bit DMA address space.
1082 if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1083 (error = age_check_boundary(sc)) != 0) {
1084 device_printf(sc->age_dev, "4GB boundary crossed, "
1085 "switching to 32bit DMA addressing mode.\n");
1087 /* Limit DMA address space to 32bit and try again. */
1088 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1093 * Create Tx/Rx buffer parent tag.
1094 * L1 supports full 64bit DMA addressing in Tx/Rx buffers
1095 * so it needs separate parent DMA tag.
1097 * It seems enabling 64bit DMA causes data corruption. Limit
1098 * DMA address space to 32bit.
1100 error = bus_dma_tag_create(
1101 bus_get_dma_tag(sc->age_dev), /* parent */
1102 1, 0, /* alignment, boundary */
1103 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1104 BUS_SPACE_MAXADDR, /* highaddr */
1105 NULL, NULL, /* filter, filterarg */
1106 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1108 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1110 NULL, NULL, /* lockfunc, lockarg */
1111 &sc->age_cdata.age_buffer_tag);
1113 device_printf(sc->age_dev,
1114 "could not create parent buffer DMA tag.\n");
1118 /* Create tag for Tx buffers. */
1119 error = bus_dma_tag_create(
1120 sc->age_cdata.age_buffer_tag, /* parent */
1121 1, 0, /* alignment, boundary */
1122 BUS_SPACE_MAXADDR, /* lowaddr */
1123 BUS_SPACE_MAXADDR, /* highaddr */
1124 NULL, NULL, /* filter, filterarg */
1125 AGE_TSO_MAXSIZE, /* maxsize */
1126 AGE_MAXTXSEGS, /* nsegments */
1127 AGE_TSO_MAXSEGSIZE, /* maxsegsize */
1129 NULL, NULL, /* lockfunc, lockarg */
1130 &sc->age_cdata.age_tx_tag);
1132 device_printf(sc->age_dev, "could not create Tx DMA tag.\n");
1136 /* Create tag for Rx buffers. */
1137 error = bus_dma_tag_create(
1138 sc->age_cdata.age_buffer_tag, /* parent */
1139 AGE_RX_BUF_ALIGN, 0, /* alignment, boundary */
1140 BUS_SPACE_MAXADDR, /* lowaddr */
1141 BUS_SPACE_MAXADDR, /* highaddr */
1142 NULL, NULL, /* filter, filterarg */
1143 MCLBYTES, /* maxsize */
1145 MCLBYTES, /* maxsegsize */
1147 NULL, NULL, /* lockfunc, lockarg */
1148 &sc->age_cdata.age_rx_tag);
1150 device_printf(sc->age_dev, "could not create Rx DMA tag.\n");
1154 /* Create DMA maps for Tx buffers. */
1155 for (i = 0; i < AGE_TX_RING_CNT; i++) {
1156 txd = &sc->age_cdata.age_txdesc[i];
1158 txd->tx_dmamap = NULL;
1159 error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0,
1162 device_printf(sc->age_dev,
1163 "could not create Tx dmamap.\n");
1167 /* Create DMA maps for Rx buffers. */
1168 if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
1169 &sc->age_cdata.age_rx_sparemap)) != 0) {
1170 device_printf(sc->age_dev,
1171 "could not create spare Rx dmamap.\n");
1174 for (i = 0; i < AGE_RX_RING_CNT; i++) {
1175 rxd = &sc->age_cdata.age_rxdesc[i];
1177 rxd->rx_dmamap = NULL;
1178 error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
1181 device_printf(sc->age_dev,
1182 "could not create Rx dmamap.\n");
1192 age_dma_free(struct age_softc *sc)
1194 struct age_txdesc *txd;
1195 struct age_rxdesc *rxd;
1199 if (sc->age_cdata.age_tx_tag != NULL) {
1200 for (i = 0; i < AGE_TX_RING_CNT; i++) {
1201 txd = &sc->age_cdata.age_txdesc[i];
1202 if (txd->tx_dmamap != NULL) {
1203 bus_dmamap_destroy(sc->age_cdata.age_tx_tag,
1205 txd->tx_dmamap = NULL;
1208 bus_dma_tag_destroy(sc->age_cdata.age_tx_tag);
1209 sc->age_cdata.age_tx_tag = NULL;
1212 if (sc->age_cdata.age_rx_tag != NULL) {
1213 for (i = 0; i < AGE_RX_RING_CNT; i++) {
1214 rxd = &sc->age_cdata.age_rxdesc[i];
1215 if (rxd->rx_dmamap != NULL) {
1216 bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
1218 rxd->rx_dmamap = NULL;
1221 if (sc->age_cdata.age_rx_sparemap != NULL) {
1222 bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
1223 sc->age_cdata.age_rx_sparemap);
1224 sc->age_cdata.age_rx_sparemap = NULL;
1226 bus_dma_tag_destroy(sc->age_cdata.age_rx_tag);
1227 sc->age_cdata.age_rx_tag = NULL;
1230 if (sc->age_cdata.age_tx_ring_tag != NULL) {
1231 if (sc->age_cdata.age_tx_ring_map != NULL)
1232 bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag,
1233 sc->age_cdata.age_tx_ring_map);
1234 if (sc->age_cdata.age_tx_ring_map != NULL &&
1235 sc->age_rdata.age_tx_ring != NULL)
1236 bus_dmamem_free(sc->age_cdata.age_tx_ring_tag,
1237 sc->age_rdata.age_tx_ring,
1238 sc->age_cdata.age_tx_ring_map);
1239 sc->age_rdata.age_tx_ring = NULL;
1240 sc->age_cdata.age_tx_ring_map = NULL;
1241 bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag);
1242 sc->age_cdata.age_tx_ring_tag = NULL;
1245 if (sc->age_cdata.age_rx_ring_tag != NULL) {
1246 if (sc->age_cdata.age_rx_ring_map != NULL)
1247 bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag,
1248 sc->age_cdata.age_rx_ring_map);
1249 if (sc->age_cdata.age_rx_ring_map != NULL &&
1250 sc->age_rdata.age_rx_ring != NULL)
1251 bus_dmamem_free(sc->age_cdata.age_rx_ring_tag,
1252 sc->age_rdata.age_rx_ring,
1253 sc->age_cdata.age_rx_ring_map);
1254 sc->age_rdata.age_rx_ring = NULL;
1255 sc->age_cdata.age_rx_ring_map = NULL;
1256 bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag);
1257 sc->age_cdata.age_rx_ring_tag = NULL;
1259 /* Rx return ring. */
1260 if (sc->age_cdata.age_rr_ring_tag != NULL) {
1261 if (sc->age_cdata.age_rr_ring_map != NULL)
1262 bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag,
1263 sc->age_cdata.age_rr_ring_map);
1264 if (sc->age_cdata.age_rr_ring_map != NULL &&
1265 sc->age_rdata.age_rr_ring != NULL)
1266 bus_dmamem_free(sc->age_cdata.age_rr_ring_tag,
1267 sc->age_rdata.age_rr_ring,
1268 sc->age_cdata.age_rr_ring_map);
1269 sc->age_rdata.age_rr_ring = NULL;
1270 sc->age_cdata.age_rr_ring_map = NULL;
1271 bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag);
1272 sc->age_cdata.age_rr_ring_tag = NULL;
1275 if (sc->age_cdata.age_cmb_block_tag != NULL) {
1276 if (sc->age_cdata.age_cmb_block_map != NULL)
1277 bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag,
1278 sc->age_cdata.age_cmb_block_map);
1279 if (sc->age_cdata.age_cmb_block_map != NULL &&
1280 sc->age_rdata.age_cmb_block != NULL)
1281 bus_dmamem_free(sc->age_cdata.age_cmb_block_tag,
1282 sc->age_rdata.age_cmb_block,
1283 sc->age_cdata.age_cmb_block_map);
1284 sc->age_rdata.age_cmb_block = NULL;
1285 sc->age_cdata.age_cmb_block_map = NULL;
1286 bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag);
1287 sc->age_cdata.age_cmb_block_tag = NULL;
1290 if (sc->age_cdata.age_smb_block_tag != NULL) {
1291 if (sc->age_cdata.age_smb_block_map != NULL)
1292 bus_dmamap_unload(sc->age_cdata.age_smb_block_tag,
1293 sc->age_cdata.age_smb_block_map);
1294 if (sc->age_cdata.age_smb_block_map != NULL &&
1295 sc->age_rdata.age_smb_block != NULL)
1296 bus_dmamem_free(sc->age_cdata.age_smb_block_tag,
1297 sc->age_rdata.age_smb_block,
1298 sc->age_cdata.age_smb_block_map);
1299 sc->age_rdata.age_smb_block = NULL;
1300 sc->age_cdata.age_smb_block_map = NULL;
1301 bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag);
1302 sc->age_cdata.age_smb_block_tag = NULL;
1305 if (sc->age_cdata.age_buffer_tag != NULL) {
1306 bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag);
1307 sc->age_cdata.age_buffer_tag = NULL;
1309 if (sc->age_cdata.age_parent_tag != NULL) {
1310 bus_dma_tag_destroy(sc->age_cdata.age_parent_tag);
1311 sc->age_cdata.age_parent_tag = NULL;
1316 * Make sure the interface is stopped at reboot time.
1319 age_shutdown(device_t dev)
1322 return (age_suspend(dev));
1326 age_setwol(struct age_softc *sc)
1329 struct mii_data *mii;
1334 AGE_LOCK_ASSERT(sc);
1336 if (pci_find_cap(sc->age_dev, PCIY_PMG, &pmc) != 0) {
1337 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1339 * No PME capability, PHY power down.
1341 * Due to an unknown reason powering down PHY resulted
1342 * in unexpected results such as inaccessbility of
1343 * hardware of freshly rebooted system. Disable
1344 * powering down PHY until I got more information for
1345 * Attansic/Atheros PHY hardwares.
1348 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1349 MII_BMCR, BMCR_PDOWN);
1355 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1357 * Note, this driver resets the link speed to 10/100Mbps with
1358 * auto-negotiation but we don't know whether that operation
1359 * would succeed or not as it have no control after powering
1360 * off. If the renegotiation fail WOL may not work. Running
1361 * at 1Gbps will draw more power than 375mA at 3.3V which is
1362 * specified in PCI specification and that would result in
1363 * complete shutdowning power to ethernet controller.
1366 * Save current negotiated media speed/duplex/flow-control
1367 * to softc and restore the same link again after resuming.
1368 * PHY handling such as power down/resetting to 100Mbps
1369 * may be better handled in suspend method in phy driver.
1371 mii = device_get_softc(sc->age_miibus);
1374 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1375 switch IFM_SUBTYPE(mii->mii_media_active) {
1385 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1387 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1388 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD |
1389 ANAR_10 | ANAR_CSMA);
1390 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1391 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1394 /* Poll link state until age(4) get a 10/100 link. */
1395 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1397 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1398 switch (IFM_SUBTYPE(
1399 mii->mii_media_active)) {
1409 pause("agelnk", hz);
1412 if (i == MII_ANEGTICKS_GIGE)
1413 device_printf(sc->age_dev,
1414 "establishing link failed, "
1415 "WOL may not work!");
1418 * No link, force MAC to have 100Mbps, full-duplex link.
1419 * This is the last resort and may/may not work.
1421 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1422 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1428 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1429 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1430 CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs);
1431 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1432 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC);
1433 reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST);
1434 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1435 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1436 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1437 reg |= MAC_CFG_RX_ENB;
1438 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1442 pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2);
1443 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1444 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1445 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1446 pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1448 /* See above for powering down PHY issues. */
1449 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1450 /* No WOL, PHY power down. */
1451 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1452 MII_BMCR, BMCR_PDOWN);
1458 age_suspend(device_t dev)
1460 struct age_softc *sc;
1462 sc = device_get_softc(dev);
1473 age_resume(device_t dev)
1475 struct age_softc *sc;
1478 sc = device_get_softc(dev);
1483 if ((ifp->if_flags & IFF_UP) != 0)
1484 age_init_locked(sc);
1492 age_encap(struct age_softc *sc, struct mbuf **m_head)
1494 struct age_txdesc *txd, *txd_last;
1495 struct tx_desc *desc;
1499 bus_dma_segment_t txsegs[AGE_MAXTXSEGS];
1501 uint32_t cflags, hdrlen, ip_off, poff, vtag;
1502 int error, i, nsegs, prod, si;
1504 AGE_LOCK_ASSERT(sc);
1506 M_ASSERTPKTHDR((*m_head));
1513 if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) {
1515 * L1 requires offset of TCP/UDP payload in its Tx
1516 * descriptor to perform hardware Tx checksum offload.
1517 * Additionally, TSO requires IP/TCP header size and
1518 * modification of IP/TCP header in order to make TSO
1519 * engine work. This kind of operation takes many CPU
1520 * cycles on FreeBSD so fast host CPU is needed to get
1521 * smooth TSO performance.
1523 struct ether_header *eh;
1525 if (M_WRITABLE(m) == 0) {
1526 /* Get a writable copy. */
1527 m = m_dup(*m_head, M_NOWAIT);
1528 /* Release original mbufs. */
1536 ip_off = sizeof(struct ether_header);
1537 m = m_pullup(m, ip_off);
1542 eh = mtod(m, struct ether_header *);
1544 * Check if hardware VLAN insertion is off.
1545 * Additional check for LLC/SNAP frame?
1547 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1548 ip_off = sizeof(struct ether_vlan_header);
1549 m = m_pullup(m, ip_off);
1555 m = m_pullup(m, ip_off + sizeof(struct ip));
1560 ip = (struct ip *)(mtod(m, char *) + ip_off);
1561 poff = ip_off + (ip->ip_hl << 2);
1562 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1563 m = m_pullup(m, poff + sizeof(struct tcphdr));
1568 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1569 m = m_pullup(m, poff + (tcp->th_off << 2));
1575 * L1 requires IP/TCP header size and offset as
1576 * well as TCP pseudo checksum which complicates
1577 * TSO configuration. I guess this comes from the
1578 * adherence to Microsoft NDIS Large Send
1579 * specification which requires insertion of
1580 * pseudo checksum by upper stack. The pseudo
1581 * checksum that NDIS refers to doesn't include
1582 * TCP payload length so age(4) should recompute
1583 * the pseudo checksum here. Hopefully this wouldn't
1584 * be much burden on modern CPUs.
1585 * Reset IP checksum and recompute TCP pseudo
1586 * checksum as NDIS specification said.
1588 ip = (struct ip *)(mtod(m, char *) + ip_off);
1589 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1591 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1592 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1597 si = prod = sc->age_cdata.age_tx_prod;
1598 txd = &sc->age_cdata.age_txdesc[prod];
1600 map = txd->tx_dmamap;
1602 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
1603 *m_head, txsegs, &nsegs, 0);
1604 if (error == EFBIG) {
1605 m = m_collapse(*m_head, M_NOWAIT, AGE_MAXTXSEGS);
1612 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
1613 *m_head, txsegs, &nsegs, 0);
1619 } else if (error != 0)
1627 /* Check descriptor overrun. */
1628 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1629 bus_dmamap_unload(sc->age_cdata.age_tx_tag, map);
1634 /* Configure VLAN hardware tag insertion. */
1635 if ((m->m_flags & M_VLANTAG) != 0) {
1636 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
1637 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1638 cflags |= AGE_TD_INSERT_VLAN_TAG;
1643 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1644 /* Request TSO and set MSS. */
1645 cflags |= AGE_TD_TSO_IPV4;
1646 cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
1647 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz <<
1648 AGE_TD_TSO_MSS_SHIFT);
1649 /* Set IP/TCP header size. */
1650 cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT;
1651 cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT;
1653 * L1 requires the first buffer should only hold IP/TCP
1654 * header data. TCP payload should be handled in other
1657 hdrlen = poff + (tcp->th_off << 2);
1658 desc = &sc->age_rdata.age_tx_ring[prod];
1659 desc->addr = htole64(txsegs[0].ds_addr);
1660 desc->len = htole32(AGE_TX_BYTES(hdrlen) | vtag);
1661 desc->flags = htole32(cflags);
1662 sc->age_cdata.age_tx_cnt++;
1663 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1664 if (m->m_len - hdrlen > 0) {
1665 /* Handle remaining payload of the 1st fragment. */
1666 desc = &sc->age_rdata.age_tx_ring[prod];
1667 desc->addr = htole64(txsegs[0].ds_addr + hdrlen);
1668 desc->len = htole32(AGE_TX_BYTES(m->m_len - hdrlen) |
1670 desc->flags = htole32(cflags);
1671 sc->age_cdata.age_tx_cnt++;
1672 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1674 /* Handle remaining fragments. */
1676 } else if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1677 /* Configure Tx IP/TCP/UDP checksum offload. */
1678 cflags |= AGE_TD_CSUM;
1679 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1680 cflags |= AGE_TD_TCPCSUM;
1681 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1682 cflags |= AGE_TD_UDPCSUM;
1683 /* Set checksum start offset. */
1684 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1685 /* Set checksum insertion position of TCP/UDP. */
1686 cflags |= ((poff + m->m_pkthdr.csum_data) <<
1687 AGE_TD_CSUM_XSUMOFFSET_SHIFT);
1689 for (; i < nsegs; i++) {
1690 desc = &sc->age_rdata.age_tx_ring[prod];
1691 desc->addr = htole64(txsegs[i].ds_addr);
1692 desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag);
1693 desc->flags = htole32(cflags);
1694 sc->age_cdata.age_tx_cnt++;
1695 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1697 /* Update producer index. */
1698 sc->age_cdata.age_tx_prod = prod;
1700 /* Set EOP on the last descriptor. */
1701 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1702 desc = &sc->age_rdata.age_tx_ring[prod];
1703 desc->flags |= htole32(AGE_TD_EOP);
1705 /* Lastly set TSO header and modify IP/TCP header for TSO operation. */
1706 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1707 desc = &sc->age_rdata.age_tx_ring[si];
1708 desc->flags |= htole32(AGE_TD_TSO_HDR);
1711 /* Swap dmamap of the first and the last. */
1712 txd = &sc->age_cdata.age_txdesc[prod];
1713 map = txd_last->tx_dmamap;
1714 txd_last->tx_dmamap = txd->tx_dmamap;
1715 txd->tx_dmamap = map;
1718 /* Sync descriptors. */
1719 bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE);
1720 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
1721 sc->age_cdata.age_tx_ring_map,
1722 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1728 age_start(struct ifnet *ifp)
1730 struct age_softc *sc;
1734 age_start_locked(ifp);
1739 age_start_locked(struct ifnet *ifp)
1741 struct age_softc *sc;
1742 struct mbuf *m_head;
1747 AGE_LOCK_ASSERT(sc);
1749 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1750 IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0)
1753 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1754 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1758 * Pack the data into the transmit ring. If we
1759 * don't have room, set the OACTIVE flag and wait
1760 * for the NIC to drain the ring.
1762 if (age_encap(sc, &m_head)) {
1765 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1766 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1772 * If there's a BPF listener, bounce a copy of this frame
1775 ETHER_BPF_MTAP(ifp, m_head);
1780 AGE_COMMIT_MBOX(sc);
1781 /* Set a timeout in case the chip goes out to lunch. */
1782 sc->age_watchdog_timer = AGE_TX_TIMEOUT;
1787 age_watchdog(struct age_softc *sc)
1791 AGE_LOCK_ASSERT(sc);
1793 if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer)
1797 if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1798 if_printf(sc->age_ifp, "watchdog timeout (missed link)\n");
1800 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1801 age_init_locked(sc);
1804 if (sc->age_cdata.age_tx_cnt == 0) {
1805 if_printf(sc->age_ifp,
1806 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1807 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1808 age_start_locked(ifp);
1811 if_printf(sc->age_ifp, "watchdog timeout\n");
1813 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1814 age_init_locked(sc);
1815 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1816 age_start_locked(ifp);
1820 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1822 struct age_softc *sc;
1824 struct mii_data *mii;
1829 ifr = (struct ifreq *)data;
1833 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU)
1835 else if (ifp->if_mtu != ifr->ifr_mtu) {
1837 ifp->if_mtu = ifr->ifr_mtu;
1838 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1839 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1840 age_init_locked(sc);
1847 if ((ifp->if_flags & IFF_UP) != 0) {
1848 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1849 if (((ifp->if_flags ^ sc->age_if_flags)
1850 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1853 if ((sc->age_flags & AGE_FLAG_DETACH) == 0)
1854 age_init_locked(sc);
1857 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1860 sc->age_if_flags = ifp->if_flags;
1866 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1872 mii = device_get_softc(sc->age_miibus);
1873 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1877 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1878 if ((mask & IFCAP_TXCSUM) != 0 &&
1879 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1880 ifp->if_capenable ^= IFCAP_TXCSUM;
1881 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1882 ifp->if_hwassist |= AGE_CSUM_FEATURES;
1884 ifp->if_hwassist &= ~AGE_CSUM_FEATURES;
1886 if ((mask & IFCAP_RXCSUM) != 0 &&
1887 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1888 ifp->if_capenable ^= IFCAP_RXCSUM;
1889 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1890 reg &= ~MAC_CFG_RXCSUM_ENB;
1891 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1892 reg |= MAC_CFG_RXCSUM_ENB;
1893 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1895 if ((mask & IFCAP_TSO4) != 0 &&
1896 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1897 ifp->if_capenable ^= IFCAP_TSO4;
1898 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
1899 ifp->if_hwassist |= CSUM_TSO;
1901 ifp->if_hwassist &= ~CSUM_TSO;
1904 if ((mask & IFCAP_WOL_MCAST) != 0 &&
1905 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
1906 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1907 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1908 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1909 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1910 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1911 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1912 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1913 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1914 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1915 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1916 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1917 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1918 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1919 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1920 ifp->if_capenable &= ~IFCAP_VLAN_HWTSO;
1924 VLAN_CAPABILITIES(ifp);
1927 error = ether_ioctl(ifp, cmd, data);
1935 age_mac_config(struct age_softc *sc)
1937 struct mii_data *mii;
1940 AGE_LOCK_ASSERT(sc);
1942 mii = device_get_softc(sc->age_miibus);
1943 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1944 reg &= ~MAC_CFG_FULL_DUPLEX;
1945 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1946 reg &= ~MAC_CFG_SPEED_MASK;
1947 /* Reprogram MAC with resolved speed/duplex. */
1948 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1951 reg |= MAC_CFG_SPEED_10_100;
1954 reg |= MAC_CFG_SPEED_1000;
1957 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1958 reg |= MAC_CFG_FULL_DUPLEX;
1960 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1961 reg |= MAC_CFG_TX_FC;
1962 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1963 reg |= MAC_CFG_RX_FC;
1967 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1971 age_link_task(void *arg, int pending)
1973 struct age_softc *sc;
1974 struct mii_data *mii;
1978 sc = (struct age_softc *)arg;
1981 mii = device_get_softc(sc->age_miibus);
1983 if (mii == NULL || ifp == NULL ||
1984 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1989 sc->age_flags &= ~AGE_FLAG_LINK;
1990 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1991 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1995 sc->age_flags |= AGE_FLAG_LINK;
2002 /* Stop Rx/Tx MACs. */
2006 /* Program MACs with resolved speed/duplex/flow-control. */
2007 if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
2009 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2010 /* Restart DMA engine and Tx/Rx MAC. */
2011 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
2012 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
2013 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
2014 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2021 age_stats_update(struct age_softc *sc)
2023 struct age_stats *stat;
2027 AGE_LOCK_ASSERT(sc);
2029 stat = &sc->age_stat;
2031 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
2032 sc->age_cdata.age_smb_block_map,
2033 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2035 smb = sc->age_rdata.age_smb_block;
2036 if (smb->updated == 0)
2041 stat->rx_frames += smb->rx_frames;
2042 stat->rx_bcast_frames += smb->rx_bcast_frames;
2043 stat->rx_mcast_frames += smb->rx_mcast_frames;
2044 stat->rx_pause_frames += smb->rx_pause_frames;
2045 stat->rx_control_frames += smb->rx_control_frames;
2046 stat->rx_crcerrs += smb->rx_crcerrs;
2047 stat->rx_lenerrs += smb->rx_lenerrs;
2048 stat->rx_bytes += smb->rx_bytes;
2049 stat->rx_runts += smb->rx_runts;
2050 stat->rx_fragments += smb->rx_fragments;
2051 stat->rx_pkts_64 += smb->rx_pkts_64;
2052 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2053 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2054 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2055 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2056 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2057 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2058 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2059 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2060 stat->rx_desc_oflows += smb->rx_desc_oflows;
2061 stat->rx_alignerrs += smb->rx_alignerrs;
2062 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2063 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2064 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2067 stat->tx_frames += smb->tx_frames;
2068 stat->tx_bcast_frames += smb->tx_bcast_frames;
2069 stat->tx_mcast_frames += smb->tx_mcast_frames;
2070 stat->tx_pause_frames += smb->tx_pause_frames;
2071 stat->tx_excess_defer += smb->tx_excess_defer;
2072 stat->tx_control_frames += smb->tx_control_frames;
2073 stat->tx_deferred += smb->tx_deferred;
2074 stat->tx_bytes += smb->tx_bytes;
2075 stat->tx_pkts_64 += smb->tx_pkts_64;
2076 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2077 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2078 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2079 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2080 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2081 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2082 stat->tx_single_colls += smb->tx_single_colls;
2083 stat->tx_multi_colls += smb->tx_multi_colls;
2084 stat->tx_late_colls += smb->tx_late_colls;
2085 stat->tx_excess_colls += smb->tx_excess_colls;
2086 stat->tx_underrun += smb->tx_underrun;
2087 stat->tx_desc_underrun += smb->tx_desc_underrun;
2088 stat->tx_lenerrs += smb->tx_lenerrs;
2089 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2090 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2091 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2093 /* Update counters in ifnet. */
2094 ifp->if_opackets += smb->tx_frames;
2096 ifp->if_collisions += smb->tx_single_colls +
2097 smb->tx_multi_colls + smb->tx_late_colls +
2098 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2100 ifp->if_oerrors += smb->tx_excess_colls +
2101 smb->tx_late_colls + smb->tx_underrun +
2102 smb->tx_pkts_truncated;
2104 ifp->if_ipackets += smb->rx_frames;
2106 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2107 smb->rx_runts + smb->rx_pkts_truncated +
2108 smb->rx_fifo_oflows + smb->rx_desc_oflows +
2111 /* Update done, clear. */
2114 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
2115 sc->age_cdata.age_smb_block_map,
2116 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2122 struct age_softc *sc;
2125 sc = (struct age_softc *)arg;
2127 status = CSR_READ_4(sc, AGE_INTR_STATUS);
2128 if (status == 0 || (status & AGE_INTRS) == 0)
2129 return (FILTER_STRAY);
2130 /* Disable interrupts. */
2131 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
2132 taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
2134 return (FILTER_HANDLED);
2138 age_int_task(void *arg, int pending)
2140 struct age_softc *sc;
2145 sc = (struct age_softc *)arg;
2149 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2150 sc->age_cdata.age_cmb_block_map,
2151 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2152 cmb = sc->age_rdata.age_cmb_block;
2153 status = le32toh(cmb->intr_status);
2154 if (sc->age_morework != 0)
2155 status |= INTR_CMB_RX;
2156 if ((status & AGE_INTRS) == 0)
2159 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
2161 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
2163 /* Let hardware know CMB was served. */
2164 cmb->intr_status = 0;
2165 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2166 sc->age_cdata.age_cmb_block_map,
2167 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2170 printf("INTR: 0x%08x\n", status);
2171 status &= ~INTR_DIS_DMA;
2172 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
2175 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2176 if ((status & INTR_CMB_RX) != 0)
2177 sc->age_morework = age_rxintr(sc, sc->age_rr_prod,
2178 sc->age_process_limit);
2179 if ((status & INTR_CMB_TX) != 0)
2180 age_txintr(sc, sc->age_tpd_cons);
2181 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
2182 if ((status & INTR_DMA_RD_TO_RST) != 0)
2183 device_printf(sc->age_dev,
2184 "DMA read error! -- resetting\n");
2185 if ((status & INTR_DMA_WR_TO_RST) != 0)
2186 device_printf(sc->age_dev,
2187 "DMA write error! -- resetting\n");
2188 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2189 age_init_locked(sc);
2191 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2192 age_start_locked(ifp);
2193 if ((status & INTR_SMB) != 0)
2194 age_stats_update(sc);
2197 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
2198 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2199 sc->age_cdata.age_cmb_block_map,
2200 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2201 status = le32toh(cmb->intr_status);
2202 if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) {
2203 taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
2209 /* Re-enable interrupts. */
2210 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
2215 age_txintr(struct age_softc *sc, int tpd_cons)
2218 struct age_txdesc *txd;
2221 AGE_LOCK_ASSERT(sc);
2225 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
2226 sc->age_cdata.age_tx_ring_map,
2227 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2230 * Go through our Tx list and free mbufs for those
2231 * frames which have been transmitted.
2233 cons = sc->age_cdata.age_tx_cons;
2234 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
2235 if (sc->age_cdata.age_tx_cnt <= 0)
2238 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2239 sc->age_cdata.age_tx_cnt--;
2240 txd = &sc->age_cdata.age_txdesc[cons];
2242 * Clear Tx descriptors, it's not required but would
2243 * help debugging in case of Tx issues.
2245 txd->tx_desc->addr = 0;
2246 txd->tx_desc->len = 0;
2247 txd->tx_desc->flags = 0;
2249 if (txd->tx_m == NULL)
2251 /* Reclaim transmitted mbufs. */
2252 bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap,
2253 BUS_DMASYNC_POSTWRITE);
2254 bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap);
2260 sc->age_cdata.age_tx_cons = cons;
2263 * Unarm watchdog timer only when there are no pending
2264 * Tx descriptors in queue.
2266 if (sc->age_cdata.age_tx_cnt == 0)
2267 sc->age_watchdog_timer = 0;
2268 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
2269 sc->age_cdata.age_tx_ring_map,
2270 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2274 #ifndef __NO_STRICT_ALIGNMENT
2275 static struct mbuf *
2276 age_fixup_rx(struct ifnet *ifp, struct mbuf *m)
2280 uint16_t *src, *dst;
2282 src = mtod(m, uint16_t *);
2285 if (m->m_next == NULL) {
2286 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
2292 * Append a new mbuf to received mbuf chain and copy ethernet
2293 * header from the mbuf chain. This can save lots of CPU
2294 * cycles for jumbo frame.
2296 MGETHDR(n, M_NOWAIT, MT_DATA);
2302 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
2303 m->m_data += ETHER_HDR_LEN;
2304 m->m_len -= ETHER_HDR_LEN;
2305 n->m_len = ETHER_HDR_LEN;
2306 M_MOVE_PKTHDR(n, m);
2312 /* Receive a frame. */
2314 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
2316 struct age_rxdesc *rxd;
2318 struct mbuf *mp, *m;
2319 uint32_t status, index, vtag;
2323 AGE_LOCK_ASSERT(sc);
2326 status = le32toh(rxrd->flags);
2327 index = le32toh(rxrd->index);
2328 rx_cons = AGE_RX_CONS(index);
2329 nsegs = AGE_RX_NSEGS(index);
2331 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
2332 if ((status & (AGE_RRD_ERROR | AGE_RRD_LENGTH_NOK)) != 0) {
2334 * We want to pass the following frames to upper
2335 * layer regardless of error status of Rx return
2338 * o IP/TCP/UDP checksum is bad.
2339 * o frame length and protocol specific length
2342 status |= AGE_RRD_IPCSUM_NOK | AGE_RRD_TCP_UDPCSUM_NOK;
2343 if ((status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
2344 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0)
2348 for (count = 0; count < nsegs; count++,
2349 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
2350 rxd = &sc->age_cdata.age_rxdesc[rx_cons];
2352 /* Add a new receive buffer to the ring. */
2353 if (age_newbuf(sc, rxd) != 0) {
2355 /* Reuse Rx buffers. */
2356 if (sc->age_cdata.age_rxhead != NULL)
2357 m_freem(sc->age_cdata.age_rxhead);
2362 * Assume we've received a full sized frame.
2363 * Actual size is fixed when we encounter the end of
2364 * multi-segmented frame.
2366 mp->m_len = AGE_RX_BUF_SIZE;
2368 /* Chain received mbufs. */
2369 if (sc->age_cdata.age_rxhead == NULL) {
2370 sc->age_cdata.age_rxhead = mp;
2371 sc->age_cdata.age_rxtail = mp;
2373 mp->m_flags &= ~M_PKTHDR;
2374 sc->age_cdata.age_rxprev_tail =
2375 sc->age_cdata.age_rxtail;
2376 sc->age_cdata.age_rxtail->m_next = mp;
2377 sc->age_cdata.age_rxtail = mp;
2380 if (count == nsegs - 1) {
2381 /* Last desc. for this frame. */
2382 m = sc->age_cdata.age_rxhead;
2383 m->m_flags |= M_PKTHDR;
2385 * It seems that L1 controller has no way
2386 * to tell hardware to strip CRC bytes.
2388 m->m_pkthdr.len = sc->age_cdata.age_rxlen -
2391 /* Set last mbuf size. */
2392 mp->m_len = sc->age_cdata.age_rxlen -
2393 ((nsegs - 1) * AGE_RX_BUF_SIZE);
2394 /* Remove the CRC bytes in chained mbufs. */
2395 if (mp->m_len <= ETHER_CRC_LEN) {
2396 sc->age_cdata.age_rxtail =
2397 sc->age_cdata.age_rxprev_tail;
2398 sc->age_cdata.age_rxtail->m_len -=
2399 (ETHER_CRC_LEN - mp->m_len);
2400 sc->age_cdata.age_rxtail->m_next = NULL;
2403 mp->m_len -= ETHER_CRC_LEN;
2406 m->m_len = m->m_pkthdr.len;
2407 m->m_pkthdr.rcvif = ifp;
2409 * Set checksum information.
2410 * It seems that L1 controller can compute partial
2411 * checksum. The partial checksum value can be used
2412 * to accelerate checksum computation for fragmented
2413 * TCP/UDP packets. Upper network stack already
2414 * takes advantage of the partial checksum value in
2415 * IP reassembly stage. But I'm not sure the
2416 * correctness of the partial hardware checksum
2417 * assistance due to lack of data sheet. If it is
2418 * proven to work on L1 I'll enable it.
2420 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2421 (status & AGE_RRD_IPV4) != 0) {
2422 if ((status & AGE_RRD_IPCSUM_NOK) == 0)
2423 m->m_pkthdr.csum_flags |=
2424 CSUM_IP_CHECKED | CSUM_IP_VALID;
2425 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
2426 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) {
2427 m->m_pkthdr.csum_flags |=
2428 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2429 m->m_pkthdr.csum_data = 0xffff;
2432 * Don't mark bad checksum for TCP/UDP frames
2433 * as fragmented frames may always have set
2434 * bad checksummed bit of descriptor status.
2438 /* Check for VLAN tagged frames. */
2439 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2440 (status & AGE_RRD_VLAN) != 0) {
2441 vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
2442 m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag);
2443 m->m_flags |= M_VLANTAG;
2445 #ifndef __NO_STRICT_ALIGNMENT
2446 m = age_fixup_rx(ifp, m);
2452 (*ifp->if_input)(ifp, m);
2458 /* Reset mbuf chains. */
2459 AGE_RXCHAIN_RESET(sc);
2463 age_rxintr(struct age_softc *sc, int rr_prod, int count)
2465 struct rx_rdesc *rxrd;
2466 int rr_cons, nsegs, pktlen, prog;
2468 AGE_LOCK_ASSERT(sc);
2470 rr_cons = sc->age_cdata.age_rr_cons;
2471 if (rr_cons == rr_prod)
2474 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2475 sc->age_cdata.age_rr_ring_map,
2476 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2477 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
2478 sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_POSTWRITE);
2480 for (prog = 0; rr_cons != rr_prod; prog++) {
2483 rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
2484 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
2488 * Check number of segments against received bytes.
2489 * Non-matching value would indicate that hardware
2490 * is still trying to update Rx return descriptors.
2491 * I'm not sure whether this check is really needed.
2493 pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
2494 if (nsegs != (pktlen + (AGE_RX_BUF_SIZE - 1)) / AGE_RX_BUF_SIZE)
2497 /* Received a frame. */
2498 age_rxeof(sc, rxrd);
2499 /* Clear return ring. */
2501 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
2502 sc->age_cdata.age_rx_cons += nsegs;
2503 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
2507 /* Update the consumer index. */
2508 sc->age_cdata.age_rr_cons = rr_cons;
2510 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
2511 sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
2512 /* Sync descriptors. */
2513 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2514 sc->age_cdata.age_rr_ring_map,
2515 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2517 /* Notify hardware availability of new Rx buffers. */
2518 AGE_COMMIT_MBOX(sc);
2521 return (count > 0 ? 0 : EAGAIN);
2527 struct age_softc *sc;
2528 struct mii_data *mii;
2530 sc = (struct age_softc *)arg;
2532 AGE_LOCK_ASSERT(sc);
2534 mii = device_get_softc(sc->age_miibus);
2537 callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
2541 age_reset(struct age_softc *sc)
2546 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
2547 CSR_READ_4(sc, AGE_MASTER_CFG);
2549 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2550 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
2556 device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg);
2557 /* Initialize PCIe module. From Linux. */
2558 CSR_WRITE_4(sc, 0x12FC, 0x6500);
2559 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2565 struct age_softc *sc;
2567 sc = (struct age_softc *)xsc;
2569 age_init_locked(sc);
2574 age_init_locked(struct age_softc *sc)
2577 struct mii_data *mii;
2578 uint8_t eaddr[ETHER_ADDR_LEN];
2580 uint32_t reg, fsize;
2581 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
2584 AGE_LOCK_ASSERT(sc);
2587 mii = device_get_softc(sc->age_miibus);
2589 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2593 * Cancel any pending I/O.
2598 * Reset the chip to a known state.
2602 /* Initialize descriptors. */
2603 error = age_init_rx_ring(sc);
2605 device_printf(sc->age_dev, "no memory for Rx buffers.\n");
2609 age_init_rr_ring(sc);
2610 age_init_tx_ring(sc);
2611 age_init_cmb_block(sc);
2612 age_init_smb_block(sc);
2614 /* Reprogram the station address. */
2615 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2616 CSR_WRITE_4(sc, AGE_PAR0,
2617 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2618 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
2620 /* Set descriptor base addresses. */
2621 paddr = sc->age_rdata.age_tx_ring_paddr;
2622 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
2623 paddr = sc->age_rdata.age_rx_ring_paddr;
2624 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
2625 paddr = sc->age_rdata.age_rr_ring_paddr;
2626 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
2627 paddr = sc->age_rdata.age_tx_ring_paddr;
2628 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
2629 paddr = sc->age_rdata.age_cmb_block_paddr;
2630 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
2631 paddr = sc->age_rdata.age_smb_block_paddr;
2632 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
2633 /* Set Rx/Rx return descriptor counter. */
2634 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
2635 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
2636 DESC_RRD_CNT_MASK) |
2637 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
2638 /* Set Tx descriptor counter. */
2639 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
2640 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
2642 /* Tell hardware that we're ready to load descriptors. */
2643 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
2646 * Initialize mailbox register.
2647 * Updated producer/consumer index information is exchanged
2648 * through this mailbox register. However Tx producer and
2649 * Rx return consumer/Rx producer are all shared such that
2650 * it's hard to separate code path between Tx and Rx without
2651 * locking. If L1 hardware have a separate mail box register
2652 * for Tx and Rx consumer/producer management we could have
2653 * indepent Tx/Rx handler which in turn Rx handler could have
2654 * been run without any locking.
2656 AGE_COMMIT_MBOX(sc);
2658 /* Configure IPG/IFG parameters. */
2659 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
2660 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
2661 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2662 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2663 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
2665 /* Set parameters for half-duplex media. */
2666 CSR_WRITE_4(sc, AGE_HDPX_CFG,
2667 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2668 HDPX_CFG_LCOL_MASK) |
2669 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2670 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2671 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2672 HDPX_CFG_ABEBT_MASK) |
2673 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2674 HDPX_CFG_JAMIPG_MASK));
2676 /* Configure interrupt moderation timer. */
2677 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
2678 reg = CSR_READ_4(sc, AGE_MASTER_CFG);
2679 reg &= ~MASTER_MTIMER_ENB;
2680 if (AGE_USECS(sc->age_int_mod) == 0)
2681 reg &= ~MASTER_ITIMER_ENB;
2683 reg |= MASTER_ITIMER_ENB;
2684 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
2686 device_printf(sc->age_dev, "interrupt moderation is %d us.\n",
2688 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
2690 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
2691 if (ifp->if_mtu < ETHERMTU)
2692 sc->age_max_frame_size = ETHERMTU;
2694 sc->age_max_frame_size = ifp->if_mtu;
2695 sc->age_max_frame_size += ETHER_HDR_LEN +
2696 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
2697 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
2698 /* Configure jumbo frame. */
2699 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
2700 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
2701 (((fsize / sizeof(uint64_t)) <<
2702 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
2703 ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
2704 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
2705 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
2706 RXQ_JUMBO_CFG_RRD_TIMER_MASK));
2708 /* Configure flow-control parameters. From Linux. */
2709 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
2711 * Magic workaround for old-L1.
2712 * Don't know which hw revision requires this magic.
2714 CSR_WRITE_4(sc, 0x12FC, 0x6500);
2716 * Another magic workaround for flow-control mode
2717 * change. From Linux.
2719 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2723 * Should understand pause parameter relationships between FIFO
2724 * size and number of Rx descriptors and Rx return descriptors.
2726 * Magic parameters came from Linux.
2728 switch (sc->age_chip_rev) {
2733 rxf_hi = AGE_RX_RING_CNT / 16;
2734 rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
2735 rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
2736 rrd_lo = AGE_RR_RING_CNT / 16;
2739 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
2743 rxf_hi = (reg * 7) / 8;
2744 if (rxf_hi < rxf_lo)
2745 rxf_hi = rxf_lo + 16;
2746 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
2748 rrd_hi = (reg * 7) / 8;
2751 if (rrd_hi < rrd_lo)
2752 rrd_hi = rrd_lo + 3;
2755 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
2756 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
2757 RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
2758 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
2759 RXQ_FIFO_PAUSE_THRESH_HI_MASK));
2760 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
2761 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
2762 RXQ_RRD_PAUSE_THRESH_LO_MASK) |
2763 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
2764 RXQ_RRD_PAUSE_THRESH_HI_MASK));
2766 /* Configure RxQ. */
2767 CSR_WRITE_4(sc, AGE_RXQ_CFG,
2768 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2769 RXQ_CFG_RD_BURST_MASK) |
2770 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
2771 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
2772 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
2773 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
2774 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
2776 /* Configure TxQ. */
2777 CSR_WRITE_4(sc, AGE_TXQ_CFG,
2778 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
2779 TXQ_CFG_TPD_BURST_MASK) |
2780 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
2781 TXQ_CFG_TX_FIFO_BURST_MASK) |
2782 ((TXQ_CFG_TPD_FETCH_DEFAULT <<
2783 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
2786 CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG,
2787 (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) &
2788 TX_JUMBO_TPD_TH_MASK) |
2789 ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) &
2790 TX_JUMBO_TPD_IPG_MASK));
2791 /* Configure DMA parameters. */
2792 CSR_WRITE_4(sc, AGE_DMA_CFG,
2793 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
2794 sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
2795 sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
2797 /* Configure CMB DMA write threshold. */
2798 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
2799 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
2800 CMB_WR_THRESH_RRD_MASK) |
2801 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
2802 CMB_WR_THRESH_TPD_MASK));
2804 /* Set CMB/SMB timer and enable them. */
2805 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
2806 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
2807 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
2808 /* Request SMB updates for every seconds. */
2809 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
2810 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
2813 * Disable all WOL bits as WOL can interfere normal Rx
2816 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
2819 * Configure Tx/Rx MACs.
2820 * - Auto-padding for short frames.
2821 * - Enable CRC generation.
2822 * Start with full-duplex/1000Mbps media. Actual reconfiguration
2823 * of MAC is followed after link establishment.
2825 CSR_WRITE_4(sc, AGE_MAC_CFG,
2826 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
2827 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
2828 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2829 MAC_CFG_PREAMBLE_MASK));
2830 /* Set up the receive filter. */
2834 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2835 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2836 reg |= MAC_CFG_RXCSUM_ENB;
2838 /* Ack all pending interrupts and clear it. */
2839 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
2840 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
2842 /* Finally enable Tx/Rx MAC. */
2843 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2845 sc->age_flags &= ~AGE_FLAG_LINK;
2846 /* Switch to the current media. */
2849 callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
2851 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2852 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2856 age_stop(struct age_softc *sc)
2859 struct age_txdesc *txd;
2860 struct age_rxdesc *rxd;
2864 AGE_LOCK_ASSERT(sc);
2866 * Mark the interface down and cancel the watchdog timer.
2869 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2870 sc->age_flags &= ~AGE_FLAG_LINK;
2871 callout_stop(&sc->age_tick_ch);
2872 sc->age_watchdog_timer = 0;
2875 * Disable interrupts.
2877 CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
2878 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
2879 /* Stop CMB/SMB updates. */
2880 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
2881 /* Stop Rx/Tx MAC. */
2885 CSR_WRITE_4(sc, AGE_DMA_CFG,
2886 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
2888 CSR_WRITE_4(sc, AGE_TXQ_CFG,
2889 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
2890 CSR_WRITE_4(sc, AGE_RXQ_CFG,
2891 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
2892 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2893 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
2898 device_printf(sc->age_dev,
2899 "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg);
2901 /* Reclaim Rx buffers that have been processed. */
2902 if (sc->age_cdata.age_rxhead != NULL)
2903 m_freem(sc->age_cdata.age_rxhead);
2904 AGE_RXCHAIN_RESET(sc);
2906 * Free RX and TX mbufs still in the queues.
2908 for (i = 0; i < AGE_RX_RING_CNT; i++) {
2909 rxd = &sc->age_cdata.age_rxdesc[i];
2910 if (rxd->rx_m != NULL) {
2911 bus_dmamap_sync(sc->age_cdata.age_rx_tag,
2912 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2913 bus_dmamap_unload(sc->age_cdata.age_rx_tag,
2919 for (i = 0; i < AGE_TX_RING_CNT; i++) {
2920 txd = &sc->age_cdata.age_txdesc[i];
2921 if (txd->tx_m != NULL) {
2922 bus_dmamap_sync(sc->age_cdata.age_tx_tag,
2923 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2924 bus_dmamap_unload(sc->age_cdata.age_tx_tag,
2933 age_stop_txmac(struct age_softc *sc)
2938 AGE_LOCK_ASSERT(sc);
2940 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2941 if ((reg & MAC_CFG_TX_ENB) != 0) {
2942 reg &= ~MAC_CFG_TX_ENB;
2943 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2945 /* Stop Tx DMA engine. */
2946 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2947 if ((reg & DMA_CFG_RD_ENB) != 0) {
2948 reg &= ~DMA_CFG_RD_ENB;
2949 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2951 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2952 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2953 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2958 device_printf(sc->age_dev, "stopping TxMAC timeout!\n");
2962 age_stop_rxmac(struct age_softc *sc)
2967 AGE_LOCK_ASSERT(sc);
2969 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2970 if ((reg & MAC_CFG_RX_ENB) != 0) {
2971 reg &= ~MAC_CFG_RX_ENB;
2972 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2974 /* Stop Rx DMA engine. */
2975 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2976 if ((reg & DMA_CFG_WR_ENB) != 0) {
2977 reg &= ~DMA_CFG_WR_ENB;
2978 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2980 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2981 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2982 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2987 device_printf(sc->age_dev, "stopping RxMAC timeout!\n");
2991 age_init_tx_ring(struct age_softc *sc)
2993 struct age_ring_data *rd;
2994 struct age_txdesc *txd;
2997 AGE_LOCK_ASSERT(sc);
2999 sc->age_cdata.age_tx_prod = 0;
3000 sc->age_cdata.age_tx_cons = 0;
3001 sc->age_cdata.age_tx_cnt = 0;
3003 rd = &sc->age_rdata;
3004 bzero(rd->age_tx_ring, AGE_TX_RING_SZ);
3005 for (i = 0; i < AGE_TX_RING_CNT; i++) {
3006 txd = &sc->age_cdata.age_txdesc[i];
3007 txd->tx_desc = &rd->age_tx_ring[i];
3011 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
3012 sc->age_cdata.age_tx_ring_map,
3013 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3017 age_init_rx_ring(struct age_softc *sc)
3019 struct age_ring_data *rd;
3020 struct age_rxdesc *rxd;
3023 AGE_LOCK_ASSERT(sc);
3025 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
3026 sc->age_morework = 0;
3027 rd = &sc->age_rdata;
3028 bzero(rd->age_rx_ring, AGE_RX_RING_SZ);
3029 for (i = 0; i < AGE_RX_RING_CNT; i++) {
3030 rxd = &sc->age_cdata.age_rxdesc[i];
3032 rxd->rx_desc = &rd->age_rx_ring[i];
3033 if (age_newbuf(sc, rxd) != 0)
3037 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
3038 sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE);
3044 age_init_rr_ring(struct age_softc *sc)
3046 struct age_ring_data *rd;
3048 AGE_LOCK_ASSERT(sc);
3050 sc->age_cdata.age_rr_cons = 0;
3051 AGE_RXCHAIN_RESET(sc);
3053 rd = &sc->age_rdata;
3054 bzero(rd->age_rr_ring, AGE_RR_RING_SZ);
3055 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
3056 sc->age_cdata.age_rr_ring_map,
3057 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3061 age_init_cmb_block(struct age_softc *sc)
3063 struct age_ring_data *rd;
3065 AGE_LOCK_ASSERT(sc);
3067 rd = &sc->age_rdata;
3068 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ);
3069 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
3070 sc->age_cdata.age_cmb_block_map,
3071 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3075 age_init_smb_block(struct age_softc *sc)
3077 struct age_ring_data *rd;
3079 AGE_LOCK_ASSERT(sc);
3081 rd = &sc->age_rdata;
3082 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ);
3083 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
3084 sc->age_cdata.age_smb_block_map,
3085 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3089 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd)
3091 struct rx_desc *desc;
3093 bus_dma_segment_t segs[1];
3097 AGE_LOCK_ASSERT(sc);
3099 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3102 m->m_len = m->m_pkthdr.len = MCLBYTES;
3103 #ifndef __NO_STRICT_ALIGNMENT
3104 m_adj(m, AGE_RX_BUF_ALIGN);
3107 if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag,
3108 sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3112 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3114 if (rxd->rx_m != NULL) {
3115 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
3116 BUS_DMASYNC_POSTREAD);
3117 bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap);
3119 map = rxd->rx_dmamap;
3120 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
3121 sc->age_cdata.age_rx_sparemap = map;
3122 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
3123 BUS_DMASYNC_PREREAD);
3126 desc = rxd->rx_desc;
3127 desc->addr = htole64(segs[0].ds_addr);
3128 desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) <<
3134 age_rxvlan(struct age_softc *sc)
3139 AGE_LOCK_ASSERT(sc);
3142 reg = CSR_READ_4(sc, AGE_MAC_CFG);
3143 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3144 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3145 reg |= MAC_CFG_VLAN_TAG_STRIP;
3146 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
3150 age_rxfilter(struct age_softc *sc)
3153 struct ifmultiaddr *ifma;
3158 AGE_LOCK_ASSERT(sc);
3162 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
3163 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3164 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3165 rxcfg |= MAC_CFG_BCAST;
3166 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3167 if ((ifp->if_flags & IFF_PROMISC) != 0)
3168 rxcfg |= MAC_CFG_PROMISC;
3169 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3170 rxcfg |= MAC_CFG_ALLMULTI;
3171 CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
3172 CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
3173 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
3177 /* Program new filter. */
3178 bzero(mchash, sizeof(mchash));
3180 if_maddr_rlock(ifp);
3181 TAILQ_FOREACH(ifma, &sc->age_ifp->if_multiaddrs, ifma_link) {
3182 if (ifma->ifma_addr->sa_family != AF_LINK)
3184 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3185 ifma->ifma_addr), ETHER_ADDR_LEN);
3186 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3188 if_maddr_runlock(ifp);
3190 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
3191 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
3192 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
3196 sysctl_age_stats(SYSCTL_HANDLER_ARGS)
3198 struct age_softc *sc;
3199 struct age_stats *stats;
3203 error = sysctl_handle_int(oidp, &result, 0, req);
3205 if (error != 0 || req->newptr == NULL)
3211 sc = (struct age_softc *)arg1;
3212 stats = &sc->age_stat;
3213 printf("%s statistics:\n", device_get_nameunit(sc->age_dev));
3214 printf("Transmit good frames : %ju\n",
3215 (uintmax_t)stats->tx_frames);
3216 printf("Transmit good broadcast frames : %ju\n",
3217 (uintmax_t)stats->tx_bcast_frames);
3218 printf("Transmit good multicast frames : %ju\n",
3219 (uintmax_t)stats->tx_mcast_frames);
3220 printf("Transmit pause control frames : %u\n",
3221 stats->tx_pause_frames);
3222 printf("Transmit control frames : %u\n",
3223 stats->tx_control_frames);
3224 printf("Transmit frames with excessive deferrals : %u\n",
3225 stats->tx_excess_defer);
3226 printf("Transmit deferrals : %u\n",
3227 stats->tx_deferred);
3228 printf("Transmit good octets : %ju\n",
3229 (uintmax_t)stats->tx_bytes);
3230 printf("Transmit good broadcast octets : %ju\n",
3231 (uintmax_t)stats->tx_bcast_bytes);
3232 printf("Transmit good multicast octets : %ju\n",
3233 (uintmax_t)stats->tx_mcast_bytes);
3234 printf("Transmit frames 64 bytes : %ju\n",
3235 (uintmax_t)stats->tx_pkts_64);
3236 printf("Transmit frames 65 to 127 bytes : %ju\n",
3237 (uintmax_t)stats->tx_pkts_65_127);
3238 printf("Transmit frames 128 to 255 bytes : %ju\n",
3239 (uintmax_t)stats->tx_pkts_128_255);
3240 printf("Transmit frames 256 to 511 bytes : %ju\n",
3241 (uintmax_t)stats->tx_pkts_256_511);
3242 printf("Transmit frames 512 to 1024 bytes : %ju\n",
3243 (uintmax_t)stats->tx_pkts_512_1023);
3244 printf("Transmit frames 1024 to 1518 bytes : %ju\n",
3245 (uintmax_t)stats->tx_pkts_1024_1518);
3246 printf("Transmit frames 1519 to MTU bytes : %ju\n",
3247 (uintmax_t)stats->tx_pkts_1519_max);
3248 printf("Transmit single collisions : %u\n",
3249 stats->tx_single_colls);
3250 printf("Transmit multiple collisions : %u\n",
3251 stats->tx_multi_colls);
3252 printf("Transmit late collisions : %u\n",
3253 stats->tx_late_colls);
3254 printf("Transmit abort due to excessive collisions : %u\n",
3255 stats->tx_excess_colls);
3256 printf("Transmit underruns due to FIFO underruns : %u\n",
3257 stats->tx_underrun);
3258 printf("Transmit descriptor write-back errors : %u\n",
3259 stats->tx_desc_underrun);
3260 printf("Transmit frames with length mismatched frame size : %u\n",
3262 printf("Transmit frames with truncated due to MTU size : %u\n",
3265 printf("Receive good frames : %ju\n",
3266 (uintmax_t)stats->rx_frames);
3267 printf("Receive good broadcast frames : %ju\n",
3268 (uintmax_t)stats->rx_bcast_frames);
3269 printf("Receive good multicast frames : %ju\n",
3270 (uintmax_t)stats->rx_mcast_frames);
3271 printf("Receive pause control frames : %u\n",
3272 stats->rx_pause_frames);
3273 printf("Receive control frames : %u\n",
3274 stats->rx_control_frames);
3275 printf("Receive CRC errors : %u\n",
3277 printf("Receive frames with length errors : %u\n",
3279 printf("Receive good octets : %ju\n",
3280 (uintmax_t)stats->rx_bytes);
3281 printf("Receive good broadcast octets : %ju\n",
3282 (uintmax_t)stats->rx_bcast_bytes);
3283 printf("Receive good multicast octets : %ju\n",
3284 (uintmax_t)stats->rx_mcast_bytes);
3285 printf("Receive frames too short : %u\n",
3287 printf("Receive fragmented frames : %ju\n",
3288 (uintmax_t)stats->rx_fragments);
3289 printf("Receive frames 64 bytes : %ju\n",
3290 (uintmax_t)stats->rx_pkts_64);
3291 printf("Receive frames 65 to 127 bytes : %ju\n",
3292 (uintmax_t)stats->rx_pkts_65_127);
3293 printf("Receive frames 128 to 255 bytes : %ju\n",
3294 (uintmax_t)stats->rx_pkts_128_255);
3295 printf("Receive frames 256 to 511 bytes : %ju\n",
3296 (uintmax_t)stats->rx_pkts_256_511);
3297 printf("Receive frames 512 to 1024 bytes : %ju\n",
3298 (uintmax_t)stats->rx_pkts_512_1023);
3299 printf("Receive frames 1024 to 1518 bytes : %ju\n",
3300 (uintmax_t)stats->rx_pkts_1024_1518);
3301 printf("Receive frames 1519 to MTU bytes : %ju\n",
3302 (uintmax_t)stats->rx_pkts_1519_max);
3303 printf("Receive frames too long : %ju\n",
3304 (uint64_t)stats->rx_pkts_truncated);
3305 printf("Receive frames with FIFO overflow : %u\n",
3306 stats->rx_fifo_oflows);
3307 printf("Receive frames with return descriptor overflow : %u\n",
3308 stats->rx_desc_oflows);
3309 printf("Receive frames with alignment errors : %u\n",
3310 stats->rx_alignerrs);
3311 printf("Receive frames dropped due to address filtering : %ju\n",
3312 (uint64_t)stats->rx_pkts_filtered);
3318 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3324 value = *(int *)arg1;
3325 error = sysctl_handle_int(oidp, &value, 0, req);
3326 if (error || req->newptr == NULL)
3328 if (value < low || value > high)
3330 *(int *)arg1 = value;
3336 sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS)
3338 return (sysctl_int_range(oidp, arg1, arg2, req,
3339 AGE_PROC_MIN, AGE_PROC_MAX));
3343 sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS)
3346 return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN,