2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
50 #include <net/if_arp.h>
51 #include <net/ethernet.h>
52 #include <net/if_dl.h>
53 #include <net/if_media.h>
54 #include <net/if_types.h>
55 #include <net/if_vlan_var.h>
57 #include <netinet/in.h>
58 #include <netinet/in_systm.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
65 #include <dev/pci/pcireg.h>
66 #include <dev/pci/pcivar.h>
68 #include <machine/bus.h>
69 #include <machine/in_cksum.h>
71 #include <dev/age/if_agereg.h>
72 #include <dev/age/if_agevar.h>
74 /* "device miibus" required. See GENERIC if you get errors here. */
75 #include "miibus_if.h"
77 #ifndef IFCAP_VLAN_HWTSO
78 #define IFCAP_VLAN_HWTSO 0
80 #define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
82 MODULE_DEPEND(age, pci, 1, 1, 1);
83 MODULE_DEPEND(age, ether, 1, 1, 1);
84 MODULE_DEPEND(age, miibus, 1, 1, 1);
87 static int msi_disable = 0;
88 static int msix_disable = 0;
89 TUNABLE_INT("hw.age.msi_disable", &msi_disable);
90 TUNABLE_INT("hw.age.msix_disable", &msix_disable);
93 * Devices supported by this driver.
95 static struct age_dev {
96 uint16_t age_vendorid;
97 uint16_t age_deviceid;
100 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1,
101 "Attansic Technology Corp, L1 Gigabit Ethernet" },
104 static int age_miibus_readreg(device_t, int, int);
105 static int age_miibus_writereg(device_t, int, int, int);
106 static void age_miibus_statchg(device_t);
107 static void age_mediastatus(struct ifnet *, struct ifmediareq *);
108 static int age_mediachange(struct ifnet *);
109 static int age_read_vpd_word(struct age_softc *, uint32_t, uint32_t,
111 static int age_probe(device_t);
112 static void age_get_macaddr(struct age_softc *);
113 static void age_phy_reset(struct age_softc *);
114 static int age_attach(device_t);
115 static int age_detach(device_t);
116 static void age_sysctl_node(struct age_softc *);
117 static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int);
118 static int age_check_boundary(struct age_softc *);
119 static int age_dma_alloc(struct age_softc *);
120 static void age_dma_free(struct age_softc *);
121 static int age_shutdown(device_t);
122 static void age_setwol(struct age_softc *);
123 static int age_suspend(device_t);
124 static int age_resume(device_t);
125 static int age_encap(struct age_softc *, struct mbuf **);
126 static void age_tx_task(void *, int);
127 static void age_start(struct ifnet *);
128 static void age_watchdog(struct age_softc *);
129 static int age_ioctl(struct ifnet *, u_long, caddr_t);
130 static void age_mac_config(struct age_softc *);
131 static void age_link_task(void *, int);
132 static void age_stats_update(struct age_softc *);
133 static int age_intr(void *);
134 static void age_int_task(void *, int);
135 static void age_txintr(struct age_softc *, int);
136 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *);
137 static int age_rxintr(struct age_softc *, int, int);
138 static void age_tick(void *);
139 static void age_reset(struct age_softc *);
140 static void age_init(void *);
141 static void age_init_locked(struct age_softc *);
142 static void age_stop(struct age_softc *);
143 static void age_stop_txmac(struct age_softc *);
144 static void age_stop_rxmac(struct age_softc *);
145 static void age_init_tx_ring(struct age_softc *);
146 static int age_init_rx_ring(struct age_softc *);
147 static void age_init_rr_ring(struct age_softc *);
148 static void age_init_cmb_block(struct age_softc *);
149 static void age_init_smb_block(struct age_softc *);
150 static int age_newbuf(struct age_softc *, struct age_rxdesc *);
151 static void age_rxvlan(struct age_softc *);
152 static void age_rxfilter(struct age_softc *);
153 static int sysctl_age_stats(SYSCTL_HANDLER_ARGS);
154 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
155 static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS);
156 static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS);
159 static device_method_t age_methods[] = {
160 /* Device interface. */
161 DEVMETHOD(device_probe, age_probe),
162 DEVMETHOD(device_attach, age_attach),
163 DEVMETHOD(device_detach, age_detach),
164 DEVMETHOD(device_shutdown, age_shutdown),
165 DEVMETHOD(device_suspend, age_suspend),
166 DEVMETHOD(device_resume, age_resume),
169 DEVMETHOD(miibus_readreg, age_miibus_readreg),
170 DEVMETHOD(miibus_writereg, age_miibus_writereg),
171 DEVMETHOD(miibus_statchg, age_miibus_statchg),
176 static driver_t age_driver = {
179 sizeof(struct age_softc)
182 static devclass_t age_devclass;
184 DRIVER_MODULE(age, pci, age_driver, age_devclass, 0, 0);
185 DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, 0, 0);
187 static struct resource_spec age_res_spec_mem[] = {
188 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
192 static struct resource_spec age_irq_spec_legacy[] = {
193 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
197 static struct resource_spec age_irq_spec_msi[] = {
198 { SYS_RES_IRQ, 1, RF_ACTIVE },
202 static struct resource_spec age_irq_spec_msix[] = {
203 { SYS_RES_IRQ, 1, RF_ACTIVE },
208 * Read a PHY register on the MII of the L1.
211 age_miibus_readreg(device_t dev, int phy, int reg)
213 struct age_softc *sc;
217 sc = device_get_softc(dev);
218 if (phy != sc->age_phyaddr)
221 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ |
222 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
223 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
225 v = CSR_READ_4(sc, AGE_MDIO);
226 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
231 device_printf(sc->age_dev, "phy read timeout : %d\n", reg);
235 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT);
239 * Write a PHY register on the MII of the L1.
242 age_miibus_writereg(device_t dev, int phy, int reg, int val)
244 struct age_softc *sc;
248 sc = device_get_softc(dev);
249 if (phy != sc->age_phyaddr)
252 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE |
253 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT |
254 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg));
255 for (i = AGE_PHY_TIMEOUT; i > 0; i--) {
257 v = CSR_READ_4(sc, AGE_MDIO);
258 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0)
263 device_printf(sc->age_dev, "phy write timeout : %d\n", reg);
269 * Callback from MII layer when media changes.
272 age_miibus_statchg(device_t dev)
274 struct age_softc *sc;
276 sc = device_get_softc(dev);
277 taskqueue_enqueue(taskqueue_swi, &sc->age_link_task);
281 * Get the current interface media status.
284 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
286 struct age_softc *sc;
287 struct mii_data *mii;
291 mii = device_get_softc(sc->age_miibus);
295 ifmr->ifm_status = mii->mii_media_status;
296 ifmr->ifm_active = mii->mii_media_active;
300 * Set hardware to newly-selected media.
303 age_mediachange(struct ifnet *ifp)
305 struct age_softc *sc;
306 struct mii_data *mii;
307 struct mii_softc *miisc;
312 mii = device_get_softc(sc->age_miibus);
313 if (mii->mii_instance != 0) {
314 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
315 mii_phy_reset(miisc);
317 error = mii_mediachg(mii);
324 age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset,
329 pci_write_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, offset, 2);
330 for (i = AGE_TIMEOUT; i > 0; i--) {
332 if ((pci_read_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, 2) &
337 device_printf(sc->age_dev, "VPD read timeout!\n");
342 *word = pci_read_config(sc->age_dev, vpdc + PCIR_VPD_DATA, 4);
347 age_probe(device_t dev)
351 uint16_t vendor, devid;
353 vendor = pci_get_vendor(dev);
354 devid = pci_get_device(dev);
356 for (i = 0; i < sizeof(age_devs) / sizeof(age_devs[0]);
358 if (vendor == sp->age_vendorid &&
359 devid == sp->age_deviceid) {
360 device_set_desc(dev, sp->age_name);
361 return (BUS_PROBE_DEFAULT);
369 age_get_macaddr(struct age_softc *sc)
371 uint32_t ea[2], off, reg, word;
372 int vpd_error, match, vpdc;
374 reg = CSR_READ_4(sc, AGE_SPI_CTRL);
375 if ((reg & SPI_VPD_ENB) != 0) {
376 /* Get VPD stored in TWSI EEPROM. */
378 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg);
383 if ((vpd_error = pci_find_extcap(sc->age_dev, PCIY_VPD, &vpdc)) == 0) {
385 * PCI VPD capability exists, but it seems that it's
386 * not in the standard form as stated in PCI VPD
387 * specification such that driver could not use
388 * pci_get_vpd_readonly(9) with keyword 'NA'.
389 * Search VPD data starting at address 0x0100. The data
390 * should be used as initializers to set AGE_PAR0,
391 * AGE_PAR1 register including other PCI configuration
397 for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END;
398 off += sizeof(uint32_t)) {
399 vpd_error = age_read_vpd_word(sc, vpdc, off, &word);
414 } else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) {
420 if (off >= AGE_VPD_REG_CONF_END)
422 if (vpd_error == 0) {
424 * Don't blindly trust ethernet address obtained
425 * from VPD. Check whether ethernet address is
426 * valid one. Otherwise fall-back to reading
430 if ((ea[0] == 0 && ea[1] == 0) ||
431 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) {
433 device_printf(sc->age_dev,
434 "invalid ethernet address "
435 "returned from VPD.\n");
439 if (vpd_error != 0 && (bootverbose))
440 device_printf(sc->age_dev, "VPD access failure!\n");
443 device_printf(sc->age_dev,
444 "PCI VPD capability not found!\n");
448 * It seems that L1 also provides a way to extract ethernet
449 * address via SPI flash interface. Because SPI flash memory
450 * device of different vendors vary in their instruction
451 * codes for read ID instruction, it's very hard to get
452 * instructions codes without detailed information for the
453 * flash memory device used on ethernet controller. To simplify
454 * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet
455 * address which is supposed to be set by hardware during
458 if (vpd_error != 0) {
460 * VPD is mapped to SPI flash memory or BIOS set it.
462 ea[0] = CSR_READ_4(sc, AGE_PAR0);
463 ea[1] = CSR_READ_4(sc, AGE_PAR1);
467 if ((ea[0] == 0 && ea[1] == 0) ||
468 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) {
469 device_printf(sc->age_dev,
470 "generating fake ethernet address.\n");
471 ea[0] = arc4random();
472 /* Set OUI to ASUSTek COMPUTER INC. */
473 sc->age_eaddr[0] = 0x00;
474 sc->age_eaddr[1] = 0x1B;
475 sc->age_eaddr[2] = 0xFC;
476 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
477 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
478 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
480 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF;
481 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF;
482 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF;
483 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF;
484 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF;
485 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF;
490 age_phy_reset(struct age_softc *sc)
494 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST);
496 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR);
501 age_attach(device_t dev)
503 struct age_softc *sc;
506 int error, i, msic, msixc, pmc;
509 sc = device_get_softc(dev);
512 mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
514 callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0);
515 TASK_INIT(&sc->age_int_task, 0, age_int_task, sc);
516 TASK_INIT(&sc->age_link_task, 0, age_link_task, sc);
518 /* Map the device. */
519 pci_enable_busmaster(dev);
520 sc->age_res_spec = age_res_spec_mem;
521 sc->age_irq_spec = age_irq_spec_legacy;
522 error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res);
524 device_printf(dev, "cannot allocate memory resources.\n");
528 /* Set PHY address. */
529 sc->age_phyaddr = AGE_PHY_ADDR;
534 /* Reset the ethernet controller. */
537 /* Get PCI and chip id/revision. */
538 sc->age_rev = pci_get_revid(dev);
539 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >>
540 MASTER_CHIP_REV_SHIFT;
542 device_printf(dev, "PCI device revision : 0x%04x\n", sc->age_rev);
543 device_printf(dev, "Chip id/revision : 0x%04x\n",
549 * Unintialized hardware returns an invalid chip id/revision
550 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that
551 * unplugged cable results in putting hardware into automatic
552 * power down mode which in turn returns invalld chip revision.
554 if (sc->age_chip_rev == 0xFFFF) {
555 device_printf(dev,"invalid chip revision : 0x%04x -- "
556 "not initialized?\n", sc->age_chip_rev);
561 device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n",
562 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN),
563 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN));
565 /* Allocate IRQ resources. */
566 msixc = pci_msix_count(dev);
567 msic = pci_msi_count(dev);
569 device_printf(dev, "MSIX count : %d\n", msixc);
570 device_printf(dev, "MSI count : %d\n", msic);
573 /* Prefer MSIX over MSI. */
574 if (msix_disable == 0 || msi_disable == 0) {
575 if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES &&
576 pci_alloc_msix(dev, &msixc) == 0) {
577 if (msic == AGE_MSIX_MESSAGES) {
578 device_printf(dev, "Using %d MSIX messages.\n",
580 sc->age_flags |= AGE_FLAG_MSIX;
581 sc->age_irq_spec = age_irq_spec_msix;
583 pci_release_msi(dev);
585 if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 &&
586 msic == AGE_MSI_MESSAGES &&
587 pci_alloc_msi(dev, &msic) == 0) {
588 if (msic == AGE_MSI_MESSAGES) {
589 device_printf(dev, "Using %d MSI messages.\n",
591 sc->age_flags |= AGE_FLAG_MSI;
592 sc->age_irq_spec = age_irq_spec_msi;
594 pci_release_msi(dev);
598 error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq);
600 device_printf(dev, "cannot allocate IRQ resources.\n");
605 /* Get DMA parameters from PCIe device control register. */
606 if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
607 sc->age_flags |= AGE_FLAG_PCIE;
608 burst = pci_read_config(dev, i + 0x08, 2);
609 /* Max read request size. */
610 sc->age_dma_rd_burst = ((burst >> 12) & 0x07) <<
611 DMA_CFG_RD_BURST_SHIFT;
612 /* Max payload size. */
613 sc->age_dma_wr_burst = ((burst >> 5) & 0x07) <<
614 DMA_CFG_WR_BURST_SHIFT;
616 device_printf(dev, "Read request size : %d bytes.\n",
617 128 << ((burst >> 12) & 0x07));
618 device_printf(dev, "TLP payload size : %d bytes.\n",
619 128 << ((burst >> 5) & 0x07));
622 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128;
623 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128;
626 /* Create device sysctl node. */
629 if ((error = age_dma_alloc(sc) != 0))
632 /* Load station address. */
635 ifp = sc->age_ifp = if_alloc(IFT_ETHER);
637 device_printf(dev, "cannot allocate ifnet structure.\n");
643 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
644 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
645 ifp->if_ioctl = age_ioctl;
646 ifp->if_start = age_start;
647 ifp->if_init = age_init;
648 ifp->if_snd.ifq_drv_maxlen = AGE_TX_RING_CNT - 1;
649 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
650 IFQ_SET_READY(&ifp->if_snd);
651 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
652 ifp->if_hwassist = AGE_CSUM_FEATURES | CSUM_TSO;
653 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
654 sc->age_flags |= AGE_FLAG_PMCAP;
655 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST;
657 ifp->if_capenable = ifp->if_capabilities;
659 /* Set up MII bus. */
660 if ((error = mii_phy_probe(dev, &sc->age_miibus, age_mediachange,
661 age_mediastatus)) != 0) {
662 device_printf(dev, "no PHY found!\n");
666 ether_ifattach(ifp, sc->age_eaddr);
668 /* VLAN capability setup. */
669 ifp->if_capabilities |= IFCAP_VLAN_MTU;
670 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
671 ifp->if_capenable = ifp->if_capabilities;
673 /* Tell the upper layer(s) we support long frames. */
674 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
676 /* Create local taskq. */
677 TASK_INIT(&sc->age_tx_task, 1, age_tx_task, ifp);
678 sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
679 taskqueue_thread_enqueue, &sc->age_tq);
680 if (sc->age_tq == NULL) {
681 device_printf(dev, "could not create taskqueue.\n");
686 taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
687 device_get_nameunit(sc->age_dev));
689 if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
690 msic = AGE_MSIX_MESSAGES;
691 else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
692 msic = AGE_MSI_MESSAGES;
695 for (i = 0; i < msic; i++) {
696 error = bus_setup_intr(dev, sc->age_irq[i],
697 INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc,
698 &sc->age_intrhand[i]);
703 device_printf(dev, "could not set up interrupt handler.\n");
704 taskqueue_free(sc->age_tq);
718 age_detach(device_t dev)
720 struct age_softc *sc;
724 sc = device_get_softc(dev);
727 if (device_is_attached(dev)) {
729 sc->age_flags |= AGE_FLAG_DETACH;
732 callout_drain(&sc->age_tick_ch);
733 taskqueue_drain(sc->age_tq, &sc->age_int_task);
734 taskqueue_drain(sc->age_tq, &sc->age_tx_task);
735 taskqueue_drain(taskqueue_swi, &sc->age_link_task);
739 if (sc->age_tq != NULL) {
740 taskqueue_drain(sc->age_tq, &sc->age_int_task);
741 taskqueue_free(sc->age_tq);
745 if (sc->age_miibus != NULL) {
746 device_delete_child(dev, sc->age_miibus);
747 sc->age_miibus = NULL;
749 bus_generic_detach(dev);
757 if ((sc->age_flags & AGE_FLAG_MSIX) != 0)
758 msic = AGE_MSIX_MESSAGES;
759 else if ((sc->age_flags & AGE_FLAG_MSI) != 0)
760 msic = AGE_MSI_MESSAGES;
763 for (i = 0; i < msic; i++) {
764 if (sc->age_intrhand[i] != NULL) {
765 bus_teardown_intr(dev, sc->age_irq[i],
766 sc->age_intrhand[i]);
767 sc->age_intrhand[i] = NULL;
771 bus_release_resources(dev, sc->age_irq_spec, sc->age_irq);
772 if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0)
773 pci_release_msi(dev);
774 bus_release_resources(dev, sc->age_res_spec, sc->age_res);
775 mtx_destroy(&sc->age_mtx);
781 age_sysctl_node(struct age_softc *sc)
785 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
786 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
787 "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats,
790 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
791 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
792 "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0,
793 sysctl_hw_age_int_mod, "I", "age interrupt moderation");
795 /* Pull in device tunables. */
796 sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
797 error = resource_int_value(device_get_name(sc->age_dev),
798 device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod);
800 if (sc->age_int_mod < AGE_IM_TIMER_MIN ||
801 sc->age_int_mod > AGE_IM_TIMER_MAX) {
802 device_printf(sc->age_dev,
803 "int_mod value out of range; using default: %d\n",
804 AGE_IM_TIMER_DEFAULT);
805 sc->age_int_mod = AGE_IM_TIMER_DEFAULT;
809 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev),
810 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO,
811 "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->age_process_limit,
812 0, sysctl_hw_age_proc_limit, "I",
813 "max number of Rx events to process");
815 /* Pull in device tunables. */
816 sc->age_process_limit = AGE_PROC_DEFAULT;
817 error = resource_int_value(device_get_name(sc->age_dev),
818 device_get_unit(sc->age_dev), "process_limit",
819 &sc->age_process_limit);
821 if (sc->age_process_limit < AGE_PROC_MIN ||
822 sc->age_process_limit > AGE_PROC_MAX) {
823 device_printf(sc->age_dev,
824 "process_limit value out of range; "
825 "using default: %d\n", AGE_PROC_DEFAULT);
826 sc->age_process_limit = AGE_PROC_DEFAULT;
831 struct age_dmamap_arg {
832 bus_addr_t age_busaddr;
836 age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
838 struct age_dmamap_arg *ctx;
843 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
845 ctx = (struct age_dmamap_arg *)arg;
846 ctx->age_busaddr = segs[0].ds_addr;
850 * Attansic L1 controller have single register to specify high
851 * address part of DMA blocks. So all descriptor structures and
852 * DMA memory blocks should have the same high address of given
853 * 4GB address space(i.e. crossing 4GB boundary is not allowed).
856 age_check_boundary(struct age_softc *sc)
858 bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end;
859 bus_addr_t cmb_block_end, smb_block_end;
861 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
862 tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ;
863 rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ;
864 rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ;
865 cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ;
866 smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ;
868 if ((AGE_ADDR_HI(tx_ring_end) !=
869 AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) ||
870 (AGE_ADDR_HI(rx_ring_end) !=
871 AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) ||
872 (AGE_ADDR_HI(rr_ring_end) !=
873 AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) ||
874 (AGE_ADDR_HI(cmb_block_end) !=
875 AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) ||
876 (AGE_ADDR_HI(smb_block_end) !=
877 AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr)))
880 if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) ||
881 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) ||
882 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) ||
883 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end)))
890 age_dma_alloc(struct age_softc *sc)
892 struct age_txdesc *txd;
893 struct age_rxdesc *rxd;
895 struct age_dmamap_arg ctx;
898 lowaddr = BUS_SPACE_MAXADDR;
901 /* Create parent ring/DMA block tag. */
902 error = bus_dma_tag_create(
903 bus_get_dma_tag(sc->age_dev), /* parent */
904 1, 0, /* alignment, boundary */
905 lowaddr, /* lowaddr */
906 BUS_SPACE_MAXADDR, /* highaddr */
907 NULL, NULL, /* filter, filterarg */
908 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
910 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
912 NULL, NULL, /* lockfunc, lockarg */
913 &sc->age_cdata.age_parent_tag);
915 device_printf(sc->age_dev,
916 "could not create parent DMA tag.\n");
920 /* Create tag for Tx ring. */
921 error = bus_dma_tag_create(
922 sc->age_cdata.age_parent_tag, /* parent */
923 AGE_TX_RING_ALIGN, 0, /* alignment, boundary */
924 BUS_SPACE_MAXADDR, /* lowaddr */
925 BUS_SPACE_MAXADDR, /* highaddr */
926 NULL, NULL, /* filter, filterarg */
927 AGE_TX_RING_SZ, /* maxsize */
929 AGE_TX_RING_SZ, /* maxsegsize */
931 NULL, NULL, /* lockfunc, lockarg */
932 &sc->age_cdata.age_tx_ring_tag);
934 device_printf(sc->age_dev,
935 "could not create Tx ring DMA tag.\n");
939 /* Create tag for Rx ring. */
940 error = bus_dma_tag_create(
941 sc->age_cdata.age_parent_tag, /* parent */
942 AGE_RX_RING_ALIGN, 0, /* alignment, boundary */
943 BUS_SPACE_MAXADDR, /* lowaddr */
944 BUS_SPACE_MAXADDR, /* highaddr */
945 NULL, NULL, /* filter, filterarg */
946 AGE_RX_RING_SZ, /* maxsize */
948 AGE_RX_RING_SZ, /* maxsegsize */
950 NULL, NULL, /* lockfunc, lockarg */
951 &sc->age_cdata.age_rx_ring_tag);
953 device_printf(sc->age_dev,
954 "could not create Rx ring DMA tag.\n");
958 /* Create tag for Rx return ring. */
959 error = bus_dma_tag_create(
960 sc->age_cdata.age_parent_tag, /* parent */
961 AGE_RR_RING_ALIGN, 0, /* alignment, boundary */
962 BUS_SPACE_MAXADDR, /* lowaddr */
963 BUS_SPACE_MAXADDR, /* highaddr */
964 NULL, NULL, /* filter, filterarg */
965 AGE_RR_RING_SZ, /* maxsize */
967 AGE_RR_RING_SZ, /* maxsegsize */
969 NULL, NULL, /* lockfunc, lockarg */
970 &sc->age_cdata.age_rr_ring_tag);
972 device_printf(sc->age_dev,
973 "could not create Rx return ring DMA tag.\n");
977 /* Create tag for coalesing message block. */
978 error = bus_dma_tag_create(
979 sc->age_cdata.age_parent_tag, /* parent */
980 AGE_CMB_ALIGN, 0, /* alignment, boundary */
981 BUS_SPACE_MAXADDR, /* lowaddr */
982 BUS_SPACE_MAXADDR, /* highaddr */
983 NULL, NULL, /* filter, filterarg */
984 AGE_CMB_BLOCK_SZ, /* maxsize */
986 AGE_CMB_BLOCK_SZ, /* maxsegsize */
988 NULL, NULL, /* lockfunc, lockarg */
989 &sc->age_cdata.age_cmb_block_tag);
991 device_printf(sc->age_dev,
992 "could not create CMB DMA tag.\n");
996 /* Create tag for statistics message block. */
997 error = bus_dma_tag_create(
998 sc->age_cdata.age_parent_tag, /* parent */
999 AGE_SMB_ALIGN, 0, /* alignment, boundary */
1000 BUS_SPACE_MAXADDR, /* lowaddr */
1001 BUS_SPACE_MAXADDR, /* highaddr */
1002 NULL, NULL, /* filter, filterarg */
1003 AGE_SMB_BLOCK_SZ, /* maxsize */
1005 AGE_SMB_BLOCK_SZ, /* maxsegsize */
1007 NULL, NULL, /* lockfunc, lockarg */
1008 &sc->age_cdata.age_smb_block_tag);
1010 device_printf(sc->age_dev,
1011 "could not create SMB DMA tag.\n");
1015 /* Allocate DMA'able memory and load the DMA map. */
1016 error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag,
1017 (void **)&sc->age_rdata.age_tx_ring,
1018 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1019 &sc->age_cdata.age_tx_ring_map);
1021 device_printf(sc->age_dev,
1022 "could not allocate DMA'able memory for Tx ring.\n");
1025 ctx.age_busaddr = 0;
1026 error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag,
1027 sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring,
1028 AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0);
1029 if (error != 0 || ctx.age_busaddr == 0) {
1030 device_printf(sc->age_dev,
1031 "could not load DMA'able memory for Tx ring.\n");
1034 sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr;
1036 error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag,
1037 (void **)&sc->age_rdata.age_rx_ring,
1038 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1039 &sc->age_cdata.age_rx_ring_map);
1041 device_printf(sc->age_dev,
1042 "could not allocate DMA'able memory for Rx ring.\n");
1045 ctx.age_busaddr = 0;
1046 error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag,
1047 sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring,
1048 AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0);
1049 if (error != 0 || ctx.age_busaddr == 0) {
1050 device_printf(sc->age_dev,
1051 "could not load DMA'able memory for Rx ring.\n");
1054 sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr;
1055 /* Rx return ring */
1056 error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag,
1057 (void **)&sc->age_rdata.age_rr_ring,
1058 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1059 &sc->age_cdata.age_rr_ring_map);
1061 device_printf(sc->age_dev,
1062 "could not allocate DMA'able memory for Rx return ring.\n");
1065 ctx.age_busaddr = 0;
1066 error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag,
1067 sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring,
1068 AGE_RR_RING_SZ, age_dmamap_cb,
1070 if (error != 0 || ctx.age_busaddr == 0) {
1071 device_printf(sc->age_dev,
1072 "could not load DMA'able memory for Rx return ring.\n");
1075 sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr;
1077 error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag,
1078 (void **)&sc->age_rdata.age_cmb_block,
1079 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1080 &sc->age_cdata.age_cmb_block_map);
1082 device_printf(sc->age_dev,
1083 "could not allocate DMA'able memory for CMB block.\n");
1086 ctx.age_busaddr = 0;
1087 error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag,
1088 sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block,
1089 AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
1090 if (error != 0 || ctx.age_busaddr == 0) {
1091 device_printf(sc->age_dev,
1092 "could not load DMA'able memory for CMB block.\n");
1095 sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr;
1097 error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag,
1098 (void **)&sc->age_rdata.age_smb_block,
1099 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1100 &sc->age_cdata.age_smb_block_map);
1102 device_printf(sc->age_dev,
1103 "could not allocate DMA'able memory for SMB block.\n");
1106 ctx.age_busaddr = 0;
1107 error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag,
1108 sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block,
1109 AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0);
1110 if (error != 0 || ctx.age_busaddr == 0) {
1111 device_printf(sc->age_dev,
1112 "could not load DMA'able memory for SMB block.\n");
1115 sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr;
1118 * All ring buffer and DMA blocks should have the same
1119 * high address part of 64bit DMA address space.
1121 if (lowaddr != BUS_SPACE_MAXADDR_32BIT &&
1122 (error = age_check_boundary(sc)) != 0) {
1123 device_printf(sc->age_dev, "4GB boundary crossed, "
1124 "switching to 32bit DMA addressing mode.\n");
1126 /* Limit DMA address space to 32bit and try again. */
1127 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1132 * Create Tx/Rx buffer parent tag.
1133 * L1 supports full 64bit DMA addressing in Tx/Rx buffers
1134 * so it needs separate parent DMA tag.
1136 error = bus_dma_tag_create(
1137 bus_get_dma_tag(sc->age_dev), /* parent */
1138 1, 0, /* alignment, boundary */
1139 BUS_SPACE_MAXADDR, /* lowaddr */
1140 BUS_SPACE_MAXADDR, /* highaddr */
1141 NULL, NULL, /* filter, filterarg */
1142 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1144 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1146 NULL, NULL, /* lockfunc, lockarg */
1147 &sc->age_cdata.age_buffer_tag);
1149 device_printf(sc->age_dev,
1150 "could not create parent buffer DMA tag.\n");
1154 /* Create tag for Tx buffers. */
1155 error = bus_dma_tag_create(
1156 sc->age_cdata.age_buffer_tag, /* parent */
1157 1, 0, /* alignment, boundary */
1158 BUS_SPACE_MAXADDR, /* lowaddr */
1159 BUS_SPACE_MAXADDR, /* highaddr */
1160 NULL, NULL, /* filter, filterarg */
1161 AGE_TSO_MAXSIZE, /* maxsize */
1162 AGE_MAXTXSEGS, /* nsegments */
1163 AGE_TSO_MAXSEGSIZE, /* maxsegsize */
1165 NULL, NULL, /* lockfunc, lockarg */
1166 &sc->age_cdata.age_tx_tag);
1168 device_printf(sc->age_dev, "could not create Tx DMA tag.\n");
1172 /* Create tag for Rx buffers. */
1173 error = bus_dma_tag_create(
1174 sc->age_cdata.age_buffer_tag, /* parent */
1175 1, 0, /* alignment, boundary */
1176 BUS_SPACE_MAXADDR, /* lowaddr */
1177 BUS_SPACE_MAXADDR, /* highaddr */
1178 NULL, NULL, /* filter, filterarg */
1179 MCLBYTES, /* maxsize */
1181 MCLBYTES, /* maxsegsize */
1183 NULL, NULL, /* lockfunc, lockarg */
1184 &sc->age_cdata.age_rx_tag);
1186 device_printf(sc->age_dev, "could not create Rx DMA tag.\n");
1190 /* Create DMA maps for Tx buffers. */
1191 for (i = 0; i < AGE_TX_RING_CNT; i++) {
1192 txd = &sc->age_cdata.age_txdesc[i];
1194 txd->tx_dmamap = NULL;
1195 error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0,
1198 device_printf(sc->age_dev,
1199 "could not create Tx dmamap.\n");
1203 /* Create DMA maps for Rx buffers. */
1204 if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
1205 &sc->age_cdata.age_rx_sparemap)) != 0) {
1206 device_printf(sc->age_dev,
1207 "could not create spare Rx dmamap.\n");
1210 for (i = 0; i < AGE_RX_RING_CNT; i++) {
1211 rxd = &sc->age_cdata.age_rxdesc[i];
1213 rxd->rx_dmamap = NULL;
1214 error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0,
1217 device_printf(sc->age_dev,
1218 "could not create Rx dmamap.\n");
1228 age_dma_free(struct age_softc *sc)
1230 struct age_txdesc *txd;
1231 struct age_rxdesc *rxd;
1235 if (sc->age_cdata.age_tx_tag != NULL) {
1236 for (i = 0; i < AGE_TX_RING_CNT; i++) {
1237 txd = &sc->age_cdata.age_txdesc[i];
1238 if (txd->tx_dmamap != NULL) {
1239 bus_dmamap_destroy(sc->age_cdata.age_tx_tag,
1241 txd->tx_dmamap = NULL;
1244 bus_dma_tag_destroy(sc->age_cdata.age_tx_tag);
1245 sc->age_cdata.age_tx_tag = NULL;
1248 if (sc->age_cdata.age_rx_tag != NULL) {
1249 for (i = 0; i < AGE_RX_RING_CNT; i++) {
1250 rxd = &sc->age_cdata.age_rxdesc[i];
1251 if (rxd->rx_dmamap != NULL) {
1252 bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
1254 rxd->rx_dmamap = NULL;
1257 if (sc->age_cdata.age_rx_sparemap != NULL) {
1258 bus_dmamap_destroy(sc->age_cdata.age_rx_tag,
1259 sc->age_cdata.age_rx_sparemap);
1260 sc->age_cdata.age_rx_sparemap = NULL;
1262 bus_dma_tag_destroy(sc->age_cdata.age_rx_tag);
1263 sc->age_cdata.age_rx_tag = NULL;
1266 if (sc->age_cdata.age_tx_ring_tag != NULL) {
1267 if (sc->age_cdata.age_tx_ring_map != NULL)
1268 bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag,
1269 sc->age_cdata.age_tx_ring_map);
1270 if (sc->age_cdata.age_tx_ring_map != NULL &&
1271 sc->age_rdata.age_tx_ring != NULL)
1272 bus_dmamem_free(sc->age_cdata.age_tx_ring_tag,
1273 sc->age_rdata.age_tx_ring,
1274 sc->age_cdata.age_tx_ring_map);
1275 sc->age_rdata.age_tx_ring = NULL;
1276 sc->age_cdata.age_tx_ring_map = NULL;
1277 bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag);
1278 sc->age_cdata.age_tx_ring_tag = NULL;
1281 if (sc->age_cdata.age_rx_ring_tag != NULL) {
1282 if (sc->age_cdata.age_rx_ring_map != NULL)
1283 bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag,
1284 sc->age_cdata.age_rx_ring_map);
1285 if (sc->age_cdata.age_rx_ring_map != NULL &&
1286 sc->age_rdata.age_rx_ring != NULL)
1287 bus_dmamem_free(sc->age_cdata.age_rx_ring_tag,
1288 sc->age_rdata.age_rx_ring,
1289 sc->age_cdata.age_rx_ring_map);
1290 sc->age_rdata.age_rx_ring = NULL;
1291 sc->age_cdata.age_rx_ring_map = NULL;
1292 bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag);
1293 sc->age_cdata.age_rx_ring_tag = NULL;
1295 /* Rx return ring. */
1296 if (sc->age_cdata.age_rr_ring_tag != NULL) {
1297 if (sc->age_cdata.age_rr_ring_map != NULL)
1298 bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag,
1299 sc->age_cdata.age_rr_ring_map);
1300 if (sc->age_cdata.age_rr_ring_map != NULL &&
1301 sc->age_rdata.age_rr_ring != NULL)
1302 bus_dmamem_free(sc->age_cdata.age_rr_ring_tag,
1303 sc->age_rdata.age_rr_ring,
1304 sc->age_cdata.age_rr_ring_map);
1305 sc->age_rdata.age_rr_ring = NULL;
1306 sc->age_cdata.age_rr_ring_map = NULL;
1307 bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag);
1308 sc->age_cdata.age_rr_ring_tag = NULL;
1311 if (sc->age_cdata.age_cmb_block_tag != NULL) {
1312 if (sc->age_cdata.age_cmb_block_map != NULL)
1313 bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag,
1314 sc->age_cdata.age_cmb_block_map);
1315 if (sc->age_cdata.age_cmb_block_map != NULL &&
1316 sc->age_rdata.age_cmb_block != NULL)
1317 bus_dmamem_free(sc->age_cdata.age_cmb_block_tag,
1318 sc->age_rdata.age_cmb_block,
1319 sc->age_cdata.age_cmb_block_map);
1320 sc->age_rdata.age_cmb_block = NULL;
1321 sc->age_cdata.age_cmb_block_map = NULL;
1322 bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag);
1323 sc->age_cdata.age_cmb_block_tag = NULL;
1326 if (sc->age_cdata.age_smb_block_tag != NULL) {
1327 if (sc->age_cdata.age_smb_block_map != NULL)
1328 bus_dmamap_unload(sc->age_cdata.age_smb_block_tag,
1329 sc->age_cdata.age_smb_block_map);
1330 if (sc->age_cdata.age_smb_block_map != NULL &&
1331 sc->age_rdata.age_smb_block != NULL)
1332 bus_dmamem_free(sc->age_cdata.age_smb_block_tag,
1333 sc->age_rdata.age_smb_block,
1334 sc->age_cdata.age_smb_block_map);
1335 sc->age_rdata.age_smb_block = NULL;
1336 sc->age_cdata.age_smb_block_map = NULL;
1337 bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag);
1338 sc->age_cdata.age_smb_block_tag = NULL;
1341 if (sc->age_cdata.age_buffer_tag != NULL) {
1342 bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag);
1343 sc->age_cdata.age_buffer_tag = NULL;
1345 if (sc->age_cdata.age_parent_tag != NULL) {
1346 bus_dma_tag_destroy(sc->age_cdata.age_parent_tag);
1347 sc->age_cdata.age_parent_tag = NULL;
1352 * Make sure the interface is stopped at reboot time.
1355 age_shutdown(device_t dev)
1358 return (age_suspend(dev));
1362 age_setwol(struct age_softc *sc)
1365 struct mii_data *mii;
1370 AGE_LOCK_ASSERT(sc);
1372 if (pci_find_extcap(sc->age_dev, PCIY_PMG, &pmc) == 0) {
1373 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
1375 * No PME capability, PHY power down.
1377 * Due to an unknown reason powering down PHY resulted
1378 * in unexpected results such as inaccessbility of
1379 * hardware of freshly rebooted system. Disable
1380 * powering down PHY until I got more information for
1381 * Attansic/Atheros PHY hardwares.
1384 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1385 MII_BMCR, BMCR_PDOWN);
1391 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1393 * Note, this driver resets the link speed to 10/100Mbps with
1394 * auto-negotiation but we don't know whether that operation
1395 * would succeed or not as it have no control after powering
1396 * off. If the renegotiation fail WOL may not work. Running
1397 * at 1Gbps will draw more power than 375mA at 3.3V which is
1398 * specified in PCI specification and that would result in
1399 * complete shutdowning power to ethernet controller.
1402 * Save current negotiated media speed/duplex/flow-control
1403 * to softc and restore the same link again after resuming.
1404 * PHY handling such as power down/resetting to 100Mbps
1405 * may be better handled in suspend method in phy driver.
1407 mii = device_get_softc(sc->age_miibus);
1410 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1411 switch IFM_SUBTYPE(mii->mii_media_active) {
1421 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1423 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1424 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD |
1425 ANAR_10 | ANAR_CSMA);
1426 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1427 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG);
1430 /* Poll link state until age(4) get a 10/100 link. */
1431 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1433 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1434 switch (IFM_SUBTYPE(
1435 mii->mii_media_active)) {
1445 pause("agelnk", hz);
1448 if (i == MII_ANEGTICKS_GIGE)
1449 device_printf(sc->age_dev,
1450 "establishing link failed, "
1451 "WOL may not work!");
1454 * No link, force MAC to have 100Mbps, full-duplex link.
1455 * This is the last resort and may/may not work.
1457 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1458 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1464 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0)
1465 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB;
1466 CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs);
1467 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1468 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC);
1469 reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST);
1470 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0)
1471 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST;
1472 if ((ifp->if_capenable & IFCAP_WOL) != 0) {
1473 reg |= MAC_CFG_RX_ENB;
1474 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1478 pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2);
1479 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1480 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1481 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1482 pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1484 /* See above for powering down PHY issues. */
1485 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1486 /* No WOL, PHY power down. */
1487 age_miibus_writereg(sc->age_dev, sc->age_phyaddr,
1488 MII_BMCR, BMCR_PDOWN);
1494 age_suspend(device_t dev)
1496 struct age_softc *sc;
1498 sc = device_get_softc(dev);
1509 age_resume(device_t dev)
1511 struct age_softc *sc;
1515 sc = device_get_softc(dev);
1519 * Clear INTx emulation disable for hardwares that
1520 * is set in resume event. From Linux.
1522 cmd = pci_read_config(sc->age_dev, PCIR_COMMAND, 2);
1523 if ((cmd & 0x0400) != 0) {
1525 pci_write_config(sc->age_dev, PCIR_COMMAND, cmd, 2);
1528 if ((ifp->if_flags & IFF_UP) != 0)
1529 age_init_locked(sc);
1537 age_encap(struct age_softc *sc, struct mbuf **m_head)
1539 struct age_txdesc *txd, *txd_last;
1540 struct tx_desc *desc;
1544 bus_dma_segment_t txsegs[AGE_MAXTXSEGS];
1546 uint32_t cflags, ip_off, poff, vtag;
1547 int error, i, nsegs, prod, si;
1549 AGE_LOCK_ASSERT(sc);
1551 M_ASSERTPKTHDR((*m_head));
1558 if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) {
1560 * L1 requires offset of TCP/UDP payload in its Tx
1561 * descriptor to perform hardware Tx checksum offload.
1562 * Additionally, TSO requires IP/TCP header size and
1563 * modification of IP/TCP header in order to make TSO
1564 * engine work. This kind of operation takes many CPU
1565 * cycles on FreeBSD so fast host CPU is needed to get
1566 * smooth TSO performance.
1568 struct ether_header *eh;
1570 if (M_WRITABLE(m) == 0) {
1571 /* Get a writable copy. */
1572 m = m_dup(*m_head, M_DONTWAIT);
1573 /* Release original mbufs. */
1581 ip_off = sizeof(struct ether_header);
1582 m = m_pullup(m, ip_off);
1587 eh = mtod(m, struct ether_header *);
1589 * Check if hardware VLAN insertion is off.
1590 * Additional check for LLC/SNAP frame?
1592 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1593 ip_off = sizeof(struct ether_vlan_header);
1594 m = m_pullup(m, ip_off);
1600 m = m_pullup(m, ip_off + sizeof(struct ip));
1605 ip = (struct ip *)(mtod(m, char *) + ip_off);
1606 poff = ip_off + (ip->ip_hl << 2);
1607 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1608 m = m_pullup(m, poff + sizeof(struct tcphdr));
1613 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1615 * L1 requires IP/TCP header size and offset as
1616 * well as TCP pseudo checksum which complicates
1617 * TSO configuration. I guess this comes from the
1618 * adherence to Microsoft NDIS Large Send
1619 * specification which requires insertion of
1620 * pseudo checksum by upper stack. The pseudo
1621 * checksum that NDIS refers to doesn't include
1622 * TCP payload length so age(4) should recompute
1623 * the pseudo checksum here. Hopefully this wouldn't
1624 * be much burden on modern CPUs.
1625 * Reset IP checksum and recompute TCP pseudo
1626 * checksum as NDIS specification said.
1629 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len)
1630 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1632 htons((tcp->th_off << 2) + IPPROTO_TCP));
1634 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1635 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1640 si = prod = sc->age_cdata.age_tx_prod;
1641 txd = &sc->age_cdata.age_txdesc[prod];
1643 map = txd->tx_dmamap;
1645 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
1646 *m_head, txsegs, &nsegs, 0);
1647 if (error == EFBIG) {
1648 m = m_collapse(*m_head, M_DONTWAIT, AGE_MAXTXSEGS);
1655 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
1656 *m_head, txsegs, &nsegs, 0);
1662 } else if (error != 0)
1670 /* Check descriptor overrun. */
1671 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) {
1672 bus_dmamap_unload(sc->age_cdata.age_tx_tag, map);
1677 /* Configure Tx IP/TCP/UDP checksum offload. */
1678 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) {
1679 cflags |= AGE_TD_CSUM;
1680 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1681 cflags |= AGE_TD_TCPCSUM;
1682 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1683 cflags |= AGE_TD_UDPCSUM;
1684 /* Set checksum start offset. */
1685 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT);
1686 /* Set checksum insertion position of TCP/UDP. */
1687 cflags |= ((poff + m->m_pkthdr.csum_data) <<
1688 AGE_TD_CSUM_XSUMOFFSET_SHIFT);
1691 /* Configure TSO. */
1692 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1693 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1694 /* Not TSO but IP/TCP checksum offload. */
1695 cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
1696 /* Clear TSO in order not to set AGE_TD_TSO_HDR. */
1697 m->m_pkthdr.csum_flags &= ~CSUM_TSO;
1699 /* Request TSO and set MSS. */
1700 cflags |= AGE_TD_TSO_IPV4;
1701 cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM;
1702 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz <<
1703 AGE_TD_TSO_MSS_SHIFT);
1705 /* Set IP/TCP header size. */
1706 cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT;
1707 cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT;
1710 /* Configure VLAN hardware tag insertion. */
1711 if ((m->m_flags & M_VLANTAG) != 0) {
1712 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag);
1713 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK);
1714 cflags |= AGE_TD_INSERT_VLAN_TAG;
1718 for (i = 0; i < nsegs; i++) {
1719 desc = &sc->age_rdata.age_tx_ring[prod];
1720 desc->addr = htole64(txsegs[i].ds_addr);
1721 desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag);
1722 desc->flags = htole32(cflags);
1723 sc->age_cdata.age_tx_cnt++;
1724 AGE_DESC_INC(prod, AGE_TX_RING_CNT);
1726 /* Update producer index. */
1727 sc->age_cdata.age_tx_prod = prod;
1729 /* Set EOP on the last descriptor. */
1730 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT;
1731 desc = &sc->age_rdata.age_tx_ring[prod];
1732 desc->flags |= htole32(AGE_TD_EOP);
1734 /* Lastly set TSO header and modify IP/TCP header for TSO operation. */
1735 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1736 desc = &sc->age_rdata.age_tx_ring[si];
1737 desc->flags |= htole32(AGE_TD_TSO_HDR);
1740 /* Swap dmamap of the first and the last. */
1741 txd = &sc->age_cdata.age_txdesc[prod];
1742 map = txd_last->tx_dmamap;
1743 txd_last->tx_dmamap = txd->tx_dmamap;
1744 txd->tx_dmamap = map;
1747 /* Sync descriptors. */
1748 bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE);
1749 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
1750 sc->age_cdata.age_tx_ring_map,
1751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1757 age_tx_task(void *arg, int pending)
1761 ifp = (struct ifnet *)arg;
1766 age_start(struct ifnet *ifp)
1768 struct age_softc *sc;
1769 struct mbuf *m_head;
1776 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1777 IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0) {
1782 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1783 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1787 * Pack the data into the transmit ring. If we
1788 * don't have room, set the OACTIVE flag and wait
1789 * for the NIC to drain the ring.
1791 if (age_encap(sc, &m_head)) {
1794 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1795 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1801 * If there's a BPF listener, bounce a copy of this frame
1804 ETHER_BPF_MTAP(ifp, m_head);
1809 AGE_COMMIT_MBOX(sc);
1810 /* Set a timeout in case the chip goes out to lunch. */
1811 sc->age_watchdog_timer = AGE_TX_TIMEOUT;
1818 age_watchdog(struct age_softc *sc)
1822 AGE_LOCK_ASSERT(sc);
1824 if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer)
1828 if ((sc->age_flags & AGE_FLAG_LINK) == 0) {
1829 if_printf(sc->age_ifp, "watchdog timeout (missed link)\n");
1831 age_init_locked(sc);
1834 if (sc->age_cdata.age_tx_cnt == 0) {
1835 if_printf(sc->age_ifp,
1836 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1837 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1838 taskqueue_enqueue(sc->age_tq, &sc->age_tx_task);
1841 if_printf(sc->age_ifp, "watchdog timeout\n");
1843 age_init_locked(sc);
1844 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1845 taskqueue_enqueue(sc->age_tq, &sc->age_tx_task);
1849 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1851 struct age_softc *sc;
1853 struct mii_data *mii;
1858 ifr = (struct ifreq *)data;
1862 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU)
1864 else if (ifp->if_mtu != ifr->ifr_mtu) {
1866 ifp->if_mtu = ifr->ifr_mtu;
1867 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1868 age_init_locked(sc);
1874 if ((ifp->if_flags & IFF_UP) != 0) {
1875 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1876 if (((ifp->if_flags ^ sc->age_if_flags)
1877 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1880 if ((sc->age_flags & AGE_FLAG_DETACH) == 0)
1881 age_init_locked(sc);
1884 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1887 sc->age_if_flags = ifp->if_flags;
1893 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1899 mii = device_get_softc(sc->age_miibus);
1900 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1904 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1905 if ((mask & IFCAP_TXCSUM) != 0 &&
1906 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1907 ifp->if_capenable ^= IFCAP_TXCSUM;
1908 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1909 ifp->if_hwassist |= AGE_CSUM_FEATURES;
1911 ifp->if_hwassist &= ~AGE_CSUM_FEATURES;
1913 if ((mask & IFCAP_RXCSUM) != 0 &&
1914 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1915 ifp->if_capenable ^= IFCAP_RXCSUM;
1916 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1917 reg &= ~MAC_CFG_RXCSUM_ENB;
1918 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1919 reg |= MAC_CFG_RXCSUM_ENB;
1920 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
1922 if ((mask & IFCAP_TSO4) != 0 &&
1923 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
1924 ifp->if_capenable ^= IFCAP_TSO4;
1925 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
1926 ifp->if_hwassist |= CSUM_TSO;
1928 ifp->if_hwassist &= ~CSUM_TSO;
1931 if ((mask & IFCAP_WOL_MCAST) != 0 &&
1932 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0)
1933 ifp->if_capenable ^= IFCAP_WOL_MCAST;
1934 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1935 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0)
1936 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1938 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1939 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1940 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1943 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1944 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1945 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1946 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
1947 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
1948 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1950 * VLAN hardware tagging is required to do checksum
1951 * offload or TSO on VLAN interface. Checksum offload
1952 * on VLAN interface also requires hardware assistance
1953 * of parent interface.
1955 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0)
1956 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM;
1957 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0)
1958 ifp->if_capenable &=
1959 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM);
1961 VLAN_CAPABILITIES(ifp);
1964 error = ether_ioctl(ifp, cmd, data);
1972 age_mac_config(struct age_softc *sc)
1974 struct mii_data *mii;
1977 AGE_LOCK_ASSERT(sc);
1979 mii = device_get_softc(sc->age_miibus);
1980 reg = CSR_READ_4(sc, AGE_MAC_CFG);
1981 reg &= ~MAC_CFG_FULL_DUPLEX;
1982 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC);
1983 reg &= ~MAC_CFG_SPEED_MASK;
1984 /* Reprogram MAC with resolved speed/duplex. */
1985 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1988 reg |= MAC_CFG_SPEED_10_100;
1991 reg |= MAC_CFG_SPEED_1000;
1994 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1995 reg |= MAC_CFG_FULL_DUPLEX;
1997 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1998 reg |= MAC_CFG_TX_FC;
1999 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2000 reg |= MAC_CFG_RX_FC;
2004 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2008 age_link_task(void *arg, int pending)
2010 struct age_softc *sc;
2011 struct mii_data *mii;
2015 sc = (struct age_softc *)arg;
2018 mii = device_get_softc(sc->age_miibus);
2020 if (mii == NULL || ifp == NULL ||
2021 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2026 sc->age_flags &= ~AGE_FLAG_LINK;
2027 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2028 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2032 sc->age_flags |= AGE_FLAG_LINK;
2039 /* Stop Rx/Tx MACs. */
2043 /* Program MACs with resolved speed/duplex/flow-control. */
2044 if ((sc->age_flags & AGE_FLAG_LINK) != 0) {
2046 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2047 /* Restart DMA engine and Tx/Rx MAC. */
2048 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) |
2049 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB);
2050 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB;
2051 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2058 age_stats_update(struct age_softc *sc)
2060 struct age_stats *stat;
2064 AGE_LOCK_ASSERT(sc);
2066 stat = &sc->age_stat;
2068 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
2069 sc->age_cdata.age_smb_block_map,
2070 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2072 smb = sc->age_rdata.age_smb_block;
2073 if (smb->updated == 0)
2078 stat->rx_frames += smb->rx_frames;
2079 stat->rx_bcast_frames += smb->rx_bcast_frames;
2080 stat->rx_mcast_frames += smb->rx_mcast_frames;
2081 stat->rx_pause_frames += smb->rx_pause_frames;
2082 stat->rx_control_frames += smb->rx_control_frames;
2083 stat->rx_crcerrs += smb->rx_crcerrs;
2084 stat->rx_lenerrs += smb->rx_lenerrs;
2085 stat->rx_bytes += smb->rx_bytes;
2086 stat->rx_runts += smb->rx_runts;
2087 stat->rx_fragments += smb->rx_fragments;
2088 stat->rx_pkts_64 += smb->rx_pkts_64;
2089 stat->rx_pkts_65_127 += smb->rx_pkts_65_127;
2090 stat->rx_pkts_128_255 += smb->rx_pkts_128_255;
2091 stat->rx_pkts_256_511 += smb->rx_pkts_256_511;
2092 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023;
2093 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518;
2094 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max;
2095 stat->rx_pkts_truncated += smb->rx_pkts_truncated;
2096 stat->rx_fifo_oflows += smb->rx_fifo_oflows;
2097 stat->rx_desc_oflows += smb->rx_desc_oflows;
2098 stat->rx_alignerrs += smb->rx_alignerrs;
2099 stat->rx_bcast_bytes += smb->rx_bcast_bytes;
2100 stat->rx_mcast_bytes += smb->rx_mcast_bytes;
2101 stat->rx_pkts_filtered += smb->rx_pkts_filtered;
2104 stat->tx_frames += smb->tx_frames;
2105 stat->tx_bcast_frames += smb->tx_bcast_frames;
2106 stat->tx_mcast_frames += smb->tx_mcast_frames;
2107 stat->tx_pause_frames += smb->tx_pause_frames;
2108 stat->tx_excess_defer += smb->tx_excess_defer;
2109 stat->tx_control_frames += smb->tx_control_frames;
2110 stat->tx_deferred += smb->tx_deferred;
2111 stat->tx_bytes += smb->tx_bytes;
2112 stat->tx_pkts_64 += smb->tx_pkts_64;
2113 stat->tx_pkts_65_127 += smb->tx_pkts_65_127;
2114 stat->tx_pkts_128_255 += smb->tx_pkts_128_255;
2115 stat->tx_pkts_256_511 += smb->tx_pkts_256_511;
2116 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023;
2117 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518;
2118 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max;
2119 stat->tx_single_colls += smb->tx_single_colls;
2120 stat->tx_multi_colls += smb->tx_multi_colls;
2121 stat->tx_late_colls += smb->tx_late_colls;
2122 stat->tx_excess_colls += smb->tx_excess_colls;
2123 stat->tx_underrun += smb->tx_underrun;
2124 stat->tx_desc_underrun += smb->tx_desc_underrun;
2125 stat->tx_lenerrs += smb->tx_lenerrs;
2126 stat->tx_pkts_truncated += smb->tx_pkts_truncated;
2127 stat->tx_bcast_bytes += smb->tx_bcast_bytes;
2128 stat->tx_mcast_bytes += smb->tx_mcast_bytes;
2130 /* Update counters in ifnet. */
2131 ifp->if_opackets += smb->tx_frames;
2133 ifp->if_collisions += smb->tx_single_colls +
2134 smb->tx_multi_colls + smb->tx_late_colls +
2135 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT;
2137 ifp->if_oerrors += smb->tx_excess_colls +
2138 smb->tx_late_colls + smb->tx_underrun +
2139 smb->tx_pkts_truncated;
2141 ifp->if_ipackets += smb->rx_frames;
2143 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs +
2144 smb->rx_runts + smb->rx_pkts_truncated +
2145 smb->rx_fifo_oflows + smb->rx_desc_oflows +
2148 /* Update done, clear. */
2151 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
2152 sc->age_cdata.age_smb_block_map,
2153 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2159 struct age_softc *sc;
2162 sc = (struct age_softc *)arg;
2164 status = CSR_READ_4(sc, AGE_INTR_STATUS);
2165 if (status == 0 || (status & AGE_INTRS) == 0)
2166 return (FILTER_STRAY);
2167 /* Disable interrupts. */
2168 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
2169 taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
2171 return (FILTER_HANDLED);
2175 age_int_task(void *arg, int pending)
2177 struct age_softc *sc;
2182 sc = (struct age_softc *)arg;
2186 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2187 sc->age_cdata.age_cmb_block_map,
2188 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2189 cmb = sc->age_rdata.age_cmb_block;
2190 status = le32toh(cmb->intr_status);
2191 if (sc->age_morework != 0)
2192 status |= INTR_CMB_RX;
2193 if ((status & AGE_INTRS) == 0)
2196 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >>
2198 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >>
2200 /* Let hardware know CMB was served. */
2201 cmb->intr_status = 0;
2202 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2203 sc->age_cdata.age_cmb_block_map,
2204 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2207 printf("INTR: 0x%08x\n", status);
2208 status &= ~INTR_DIS_DMA;
2209 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT);
2212 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2213 if ((status & INTR_CMB_RX) != 0)
2214 sc->age_morework = age_rxintr(sc, sc->age_rr_prod,
2215 sc->age_process_limit);
2216 if ((status & INTR_CMB_TX) != 0)
2217 age_txintr(sc, sc->age_tpd_cons);
2218 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) {
2219 if ((status & INTR_DMA_RD_TO_RST) != 0)
2220 device_printf(sc->age_dev,
2221 "DMA read error! -- resetting\n");
2222 if ((status & INTR_DMA_WR_TO_RST) != 0)
2223 device_printf(sc->age_dev,
2224 "DMA write error! -- resetting\n");
2225 age_init_locked(sc);
2227 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2228 taskqueue_enqueue(sc->age_tq, &sc->age_tx_task);
2229 if ((status & INTR_SMB) != 0)
2230 age_stats_update(sc);
2233 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */
2234 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
2235 sc->age_cdata.age_cmb_block_map,
2236 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2237 status = le32toh(cmb->intr_status);
2238 if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) {
2239 taskqueue_enqueue(sc->age_tq, &sc->age_int_task);
2245 /* Re-enable interrupts. */
2246 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
2251 age_txintr(struct age_softc *sc, int tpd_cons)
2254 struct age_txdesc *txd;
2257 AGE_LOCK_ASSERT(sc);
2261 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
2262 sc->age_cdata.age_tx_ring_map,
2263 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2266 * Go through our Tx list and free mbufs for those
2267 * frames which have been transmitted.
2269 cons = sc->age_cdata.age_tx_cons;
2270 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) {
2271 if (sc->age_cdata.age_tx_cnt <= 0)
2274 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2275 sc->age_cdata.age_tx_cnt--;
2276 txd = &sc->age_cdata.age_txdesc[cons];
2278 * Clear Tx descriptors, it's not required but would
2279 * help debugging in case of Tx issues.
2281 txd->tx_desc->addr = 0;
2282 txd->tx_desc->len = 0;
2283 txd->tx_desc->flags = 0;
2285 if (txd->tx_m == NULL)
2287 /* Reclaim transmitted mbufs. */
2288 bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap,
2289 BUS_DMASYNC_POSTWRITE);
2290 bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap);
2296 sc->age_cdata.age_tx_cons = cons;
2299 * Unarm watchdog timer only when there are no pending
2300 * Tx descriptors in queue.
2302 if (sc->age_cdata.age_tx_cnt == 0)
2303 sc->age_watchdog_timer = 0;
2304 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
2305 sc->age_cdata.age_tx_ring_map,
2306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2310 /* Receive a frame. */
2312 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd)
2314 struct age_rxdesc *rxd;
2315 struct rx_desc *desc;
2317 struct mbuf *mp, *m;
2318 uint32_t status, index, vtag;
2319 int count, nsegs, pktlen;
2322 AGE_LOCK_ASSERT(sc);
2325 status = le32toh(rxrd->flags);
2326 index = le32toh(rxrd->index);
2327 rx_cons = AGE_RX_CONS(index);
2328 nsegs = AGE_RX_NSEGS(index);
2330 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len));
2331 if ((status & AGE_RRD_ERROR) != 0 &&
2332 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE |
2333 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) {
2335 * We want to pass the following frames to upper
2336 * layer regardless of error status of Rx return
2339 * o IP/TCP/UDP checksum is bad.
2340 * o frame length and protocol specific length
2343 sc->age_cdata.age_rx_cons += nsegs;
2344 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
2349 for (count = 0; count < nsegs; count++,
2350 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) {
2351 rxd = &sc->age_cdata.age_rxdesc[rx_cons];
2353 desc = rxd->rx_desc;
2354 /* Add a new receive buffer to the ring. */
2355 if (age_newbuf(sc, rxd) != 0) {
2357 /* Reuse Rx buffers. */
2358 if (sc->age_cdata.age_rxhead != NULL) {
2359 m_freem(sc->age_cdata.age_rxhead);
2360 AGE_RXCHAIN_RESET(sc);
2365 /* The length of the first mbuf is computed last. */
2367 mp->m_len = AGE_RX_BYTES(le32toh(desc->len));
2368 pktlen += mp->m_len;
2371 /* Chain received mbufs. */
2372 if (sc->age_cdata.age_rxhead == NULL) {
2373 sc->age_cdata.age_rxhead = mp;
2374 sc->age_cdata.age_rxtail = mp;
2376 mp->m_flags &= ~M_PKTHDR;
2377 sc->age_cdata.age_rxprev_tail =
2378 sc->age_cdata.age_rxtail;
2379 sc->age_cdata.age_rxtail->m_next = mp;
2380 sc->age_cdata.age_rxtail = mp;
2383 if (count == nsegs - 1) {
2385 * It seems that L1 controller has no way
2386 * to tell hardware to strip CRC bytes.
2388 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN;
2390 /* Remove the CRC bytes in chained mbufs. */
2391 pktlen -= ETHER_CRC_LEN;
2392 if (mp->m_len <= ETHER_CRC_LEN) {
2393 sc->age_cdata.age_rxtail =
2394 sc->age_cdata.age_rxprev_tail;
2395 sc->age_cdata.age_rxtail->m_len -=
2396 (ETHER_CRC_LEN - mp->m_len);
2397 sc->age_cdata.age_rxtail->m_next = NULL;
2400 mp->m_len -= ETHER_CRC_LEN;
2404 m = sc->age_cdata.age_rxhead;
2405 m->m_flags |= M_PKTHDR;
2406 m->m_pkthdr.rcvif = ifp;
2407 m->m_pkthdr.len = sc->age_cdata.age_rxlen;
2408 /* Set the first mbuf length. */
2409 m->m_len = sc->age_cdata.age_rxlen - pktlen;
2412 * Set checksum information.
2413 * It seems that L1 controller can compute partial
2414 * checksum. The partial checksum value can be used
2415 * to accelerate checksum computation for fragmented
2416 * TCP/UDP packets. Upper network stack already
2417 * takes advantage of the partial checksum value in
2418 * IP reassembly stage. But I'm not sure the
2419 * correctness of the partial hardware checksum
2420 * assistance due to lack of data sheet. If it is
2421 * proven to work on L1 I'll enable it.
2423 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2424 (status & AGE_RRD_IPV4) != 0) {
2425 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2426 if ((status & AGE_RRD_IPCSUM_NOK) == 0)
2427 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2428 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) &&
2429 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) {
2430 m->m_pkthdr.csum_flags |=
2431 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2432 m->m_pkthdr.csum_data = 0xffff;
2435 * Don't mark bad checksum for TCP/UDP frames
2436 * as fragmented frames may always have set
2437 * bad checksummed bit of descriptor status.
2441 /* Check for VLAN tagged frames. */
2442 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2443 (status & AGE_RRD_VLAN) != 0) {
2444 vtag = AGE_RX_VLAN(le32toh(rxrd->vtags));
2445 m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag);
2446 m->m_flags |= M_VLANTAG;
2451 (*ifp->if_input)(ifp, m);
2454 /* Reset mbuf chains. */
2455 AGE_RXCHAIN_RESET(sc);
2459 if (count != nsegs) {
2460 sc->age_cdata.age_rx_cons += nsegs;
2461 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT;
2463 sc->age_cdata.age_rx_cons = rx_cons;
2467 age_rxintr(struct age_softc *sc, int rr_prod, int count)
2469 struct rx_rdesc *rxrd;
2470 int rr_cons, nsegs, pktlen, prog;
2472 AGE_LOCK_ASSERT(sc);
2474 rr_cons = sc->age_cdata.age_rr_cons;
2475 if (rr_cons == rr_prod)
2478 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2479 sc->age_cdata.age_rr_ring_map,
2480 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2482 for (prog = 0; rr_cons != rr_prod; prog++) {
2485 rxrd = &sc->age_rdata.age_rr_ring[rr_cons];
2486 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index));
2490 * Check number of segments against received bytes.
2491 * Non-matching value would indicate that hardware
2492 * is still trying to update Rx return descriptors.
2493 * I'm not sure whether this check is really needed.
2495 pktlen = AGE_RX_BYTES(le32toh(rxrd->len));
2496 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) /
2497 (MCLBYTES - ETHER_ALIGN)))
2501 /* Received a frame. */
2502 age_rxeof(sc, rxrd);
2503 /* Clear return ring. */
2505 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT);
2509 /* Update the consumer index. */
2510 sc->age_cdata.age_rr_cons = rr_cons;
2512 /* Sync descriptors. */
2513 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
2514 sc->age_cdata.age_rr_ring_map,
2515 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2517 /* Notify hardware availability of new Rx buffers. */
2518 AGE_COMMIT_MBOX(sc);
2521 return (count > 0 ? 0 : EAGAIN);
2527 struct age_softc *sc;
2528 struct mii_data *mii;
2530 sc = (struct age_softc *)arg;
2532 AGE_LOCK_ASSERT(sc);
2534 mii = device_get_softc(sc->age_miibus);
2537 callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
2541 age_reset(struct age_softc *sc)
2546 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET);
2547 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2549 if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0)
2553 device_printf(sc->age_dev, "master reset timeout!\n");
2555 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2556 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
2562 device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg);
2563 /* Initialize PCIe module. From Linux. */
2564 CSR_WRITE_4(sc, 0x12FC, 0x6500);
2565 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2571 struct age_softc *sc;
2573 sc = (struct age_softc *)xsc;
2575 age_init_locked(sc);
2580 age_init_locked(struct age_softc *sc)
2583 struct mii_data *mii;
2584 uint8_t eaddr[ETHER_ADDR_LEN];
2586 uint32_t reg, fsize;
2587 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo;
2590 AGE_LOCK_ASSERT(sc);
2593 mii = device_get_softc(sc->age_miibus);
2596 * Cancel any pending I/O.
2601 * Reset the chip to a known state.
2605 /* Initialize descriptors. */
2606 error = age_init_rx_ring(sc);
2608 device_printf(sc->age_dev, "no memory for Rx buffers.\n");
2612 age_init_rr_ring(sc);
2613 age_init_tx_ring(sc);
2614 age_init_cmb_block(sc);
2615 age_init_smb_block(sc);
2617 /* Reprogram the station address. */
2618 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2619 CSR_WRITE_4(sc, AGE_PAR0,
2620 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]);
2621 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]);
2623 /* Set descriptor base addresses. */
2624 paddr = sc->age_rdata.age_tx_ring_paddr;
2625 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr));
2626 paddr = sc->age_rdata.age_rx_ring_paddr;
2627 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr));
2628 paddr = sc->age_rdata.age_rr_ring_paddr;
2629 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr));
2630 paddr = sc->age_rdata.age_tx_ring_paddr;
2631 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr));
2632 paddr = sc->age_rdata.age_cmb_block_paddr;
2633 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr));
2634 paddr = sc->age_rdata.age_smb_block_paddr;
2635 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr));
2636 /* Set Rx/Rx return descriptor counter. */
2637 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT,
2638 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) &
2639 DESC_RRD_CNT_MASK) |
2640 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK));
2641 /* Set Tx descriptor counter. */
2642 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT,
2643 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK);
2645 /* Tell hardware that we're ready to load descriptors. */
2646 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD);
2649 * Initialize mailbox register.
2650 * Updated producer/consumer index information is exchanged
2651 * through this mailbox register. However Tx producer and
2652 * Rx return consumer/Rx producer are all shared such that
2653 * it's hard to separate code path between Tx and Rx without
2654 * locking. If L1 hardware have a separate mail box register
2655 * for Tx and Rx consumer/producer management we could have
2656 * indepent Tx/Rx handler which in turn Rx handler could have
2657 * been run without any locking.
2659 AGE_COMMIT_MBOX(sc);
2661 /* Configure IPG/IFG parameters. */
2662 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG,
2663 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) |
2664 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) |
2665 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) |
2666 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK));
2668 /* Set parameters for half-duplex media. */
2669 CSR_WRITE_4(sc, AGE_HDPX_CFG,
2670 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) &
2671 HDPX_CFG_LCOL_MASK) |
2672 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) &
2673 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN |
2674 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) &
2675 HDPX_CFG_ABEBT_MASK) |
2676 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) &
2677 HDPX_CFG_JAMIPG_MASK));
2679 /* Configure interrupt moderation timer. */
2680 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod));
2681 reg = CSR_READ_4(sc, AGE_MASTER_CFG);
2682 reg &= ~MASTER_MTIMER_ENB;
2683 if (AGE_USECS(sc->age_int_mod) == 0)
2684 reg &= ~MASTER_ITIMER_ENB;
2686 reg |= MASTER_ITIMER_ENB;
2687 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg);
2689 device_printf(sc->age_dev, "interrupt moderation is %d us.\n",
2691 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000));
2693 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */
2694 if (ifp->if_mtu < ETHERMTU)
2695 sc->age_max_frame_size = ETHERMTU;
2697 sc->age_max_frame_size = ifp->if_mtu;
2698 sc->age_max_frame_size += ETHER_HDR_LEN +
2699 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN;
2700 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size);
2701 /* Configure jumbo frame. */
2702 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t));
2703 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG,
2704 (((fsize / sizeof(uint64_t)) <<
2705 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) |
2706 ((RXQ_JUMBO_CFG_LKAH_DEFAULT <<
2707 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) |
2708 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) &
2709 RXQ_JUMBO_CFG_RRD_TIMER_MASK));
2711 /* Configure flow-control parameters. From Linux. */
2712 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) {
2714 * Magic workaround for old-L1.
2715 * Don't know which hw revision requires this magic.
2717 CSR_WRITE_4(sc, 0x12FC, 0x6500);
2719 * Another magic workaround for flow-control mode
2720 * change. From Linux.
2722 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000);
2726 * Should understand pause parameter relationships between FIFO
2727 * size and number of Rx descriptors and Rx return descriptors.
2729 * Magic parameters came from Linux.
2731 switch (sc->age_chip_rev) {
2736 rxf_hi = AGE_RX_RING_CNT / 16;
2737 rxf_lo = (AGE_RX_RING_CNT * 7) / 8;
2738 rrd_hi = (AGE_RR_RING_CNT * 7) / 8;
2739 rrd_lo = AGE_RR_RING_CNT / 16;
2742 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN);
2746 rxf_hi = (reg * 7) / 8;
2747 if (rxf_hi < rxf_lo)
2748 rxf_hi = rxf_lo + 16;
2749 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN);
2751 rrd_hi = (reg * 7) / 8;
2754 if (rrd_hi < rrd_lo)
2755 rrd_hi = rrd_lo + 3;
2758 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH,
2759 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) &
2760 RXQ_FIFO_PAUSE_THRESH_LO_MASK) |
2761 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) &
2762 RXQ_FIFO_PAUSE_THRESH_HI_MASK));
2763 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH,
2764 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) &
2765 RXQ_RRD_PAUSE_THRESH_LO_MASK) |
2766 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) &
2767 RXQ_RRD_PAUSE_THRESH_HI_MASK));
2769 /* Configure RxQ. */
2770 CSR_WRITE_4(sc, AGE_RXQ_CFG,
2771 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) &
2772 RXQ_CFG_RD_BURST_MASK) |
2773 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT <<
2774 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) |
2775 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT <<
2776 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) |
2777 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB);
2779 /* Configure TxQ. */
2780 CSR_WRITE_4(sc, AGE_TXQ_CFG,
2781 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) &
2782 TXQ_CFG_TPD_BURST_MASK) |
2783 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) &
2784 TXQ_CFG_TX_FIFO_BURST_MASK) |
2785 ((TXQ_CFG_TPD_FETCH_DEFAULT <<
2786 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) |
2789 CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG,
2790 (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) &
2791 TX_JUMBO_TPD_TH_MASK) |
2792 ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) &
2793 TX_JUMBO_TPD_IPG_MASK));
2794 /* Configure DMA parameters. */
2795 CSR_WRITE_4(sc, AGE_DMA_CFG,
2796 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 |
2797 sc->age_dma_rd_burst | DMA_CFG_RD_ENB |
2798 sc->age_dma_wr_burst | DMA_CFG_WR_ENB);
2800 /* Configure CMB DMA write threshold. */
2801 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH,
2802 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) &
2803 CMB_WR_THRESH_RRD_MASK) |
2804 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) &
2805 CMB_WR_THRESH_TPD_MASK));
2807 /* Set CMB/SMB timer and enable them. */
2808 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER,
2809 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) |
2810 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK));
2811 /* Request SMB updates for every seconds. */
2812 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000));
2813 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB);
2816 * Disable all WOL bits as WOL can interfere normal Rx
2819 CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
2822 * Configure Tx/Rx MACs.
2823 * - Auto-padding for short frames.
2824 * - Enable CRC generation.
2825 * Start with full-duplex/1000Mbps media. Actual reconfiguration
2826 * of MAC is followed after link establishment.
2828 CSR_WRITE_4(sc, AGE_MAC_CFG,
2829 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD |
2830 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 |
2831 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) &
2832 MAC_CFG_PREAMBLE_MASK));
2833 /* Set up the receive filter. */
2837 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2838 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2839 reg |= MAC_CFG_RXCSUM_ENB;
2841 /* Ack all pending interrupts and clear it. */
2842 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0);
2843 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS);
2845 /* Finally enable Tx/Rx MAC. */
2846 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB);
2848 sc->age_flags &= ~AGE_FLAG_LINK;
2849 /* Switch to the current media. */
2852 callout_reset(&sc->age_tick_ch, hz, age_tick, sc);
2854 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2855 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2859 age_stop(struct age_softc *sc)
2862 struct age_txdesc *txd;
2863 struct age_rxdesc *rxd;
2867 AGE_LOCK_ASSERT(sc);
2869 * Mark the interface down and cancel the watchdog timer.
2872 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2873 sc->age_flags &= ~AGE_FLAG_LINK;
2874 callout_stop(&sc->age_tick_ch);
2875 sc->age_watchdog_timer = 0;
2878 * Disable interrupts.
2880 CSR_WRITE_4(sc, AGE_INTR_MASK, 0);
2881 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF);
2882 /* Stop CMB/SMB updates. */
2883 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0);
2884 /* Stop Rx/Tx MAC. */
2888 CSR_WRITE_4(sc, AGE_DMA_CFG,
2889 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB));
2891 CSR_WRITE_4(sc, AGE_TXQ_CFG,
2892 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB);
2893 CSR_WRITE_4(sc, AGE_RXQ_CFG,
2894 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB);
2895 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2896 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0)
2901 device_printf(sc->age_dev,
2902 "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg);
2904 /* Reclaim Rx buffers that have been processed. */
2905 if (sc->age_cdata.age_rxhead != NULL)
2906 m_freem(sc->age_cdata.age_rxhead);
2907 AGE_RXCHAIN_RESET(sc);
2909 * Free RX and TX mbufs still in the queues.
2911 for (i = 0; i < AGE_RX_RING_CNT; i++) {
2912 rxd = &sc->age_cdata.age_rxdesc[i];
2913 if (rxd->rx_m != NULL) {
2914 bus_dmamap_sync(sc->age_cdata.age_rx_tag,
2915 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2916 bus_dmamap_unload(sc->age_cdata.age_rx_tag,
2922 for (i = 0; i < AGE_TX_RING_CNT; i++) {
2923 txd = &sc->age_cdata.age_txdesc[i];
2924 if (txd->tx_m != NULL) {
2925 bus_dmamap_sync(sc->age_cdata.age_tx_tag,
2926 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2927 bus_dmamap_unload(sc->age_cdata.age_tx_tag,
2936 age_stop_txmac(struct age_softc *sc)
2941 AGE_LOCK_ASSERT(sc);
2943 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2944 if ((reg & MAC_CFG_TX_ENB) != 0) {
2945 reg &= ~MAC_CFG_TX_ENB;
2946 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2948 /* Stop Tx DMA engine. */
2949 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2950 if ((reg & DMA_CFG_RD_ENB) != 0) {
2951 reg &= ~DMA_CFG_RD_ENB;
2952 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2954 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2955 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2956 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0)
2961 device_printf(sc->age_dev, "stopping TxMAC timeout!\n");
2965 age_stop_rxmac(struct age_softc *sc)
2970 AGE_LOCK_ASSERT(sc);
2972 reg = CSR_READ_4(sc, AGE_MAC_CFG);
2973 if ((reg & MAC_CFG_RX_ENB) != 0) {
2974 reg &= ~MAC_CFG_RX_ENB;
2975 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
2977 /* Stop Rx DMA engine. */
2978 reg = CSR_READ_4(sc, AGE_DMA_CFG);
2979 if ((reg & DMA_CFG_WR_ENB) != 0) {
2980 reg &= ~DMA_CFG_WR_ENB;
2981 CSR_WRITE_4(sc, AGE_DMA_CFG, reg);
2983 for (i = AGE_RESET_TIMEOUT; i > 0; i--) {
2984 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) &
2985 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0)
2990 device_printf(sc->age_dev, "stopping RxMAC timeout!\n");
2994 age_init_tx_ring(struct age_softc *sc)
2996 struct age_ring_data *rd;
2997 struct age_txdesc *txd;
3000 AGE_LOCK_ASSERT(sc);
3002 sc->age_cdata.age_tx_prod = 0;
3003 sc->age_cdata.age_tx_cons = 0;
3004 sc->age_cdata.age_tx_cnt = 0;
3006 rd = &sc->age_rdata;
3007 bzero(rd->age_tx_ring, AGE_TX_RING_SZ);
3008 for (i = 0; i < AGE_TX_RING_CNT; i++) {
3009 txd = &sc->age_cdata.age_txdesc[i];
3010 txd->tx_desc = &rd->age_tx_ring[i];
3014 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag,
3015 sc->age_cdata.age_tx_ring_map,
3016 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3020 age_init_rx_ring(struct age_softc *sc)
3022 struct age_ring_data *rd;
3023 struct age_rxdesc *rxd;
3026 AGE_LOCK_ASSERT(sc);
3028 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1;
3029 sc->age_morework = 0;
3030 rd = &sc->age_rdata;
3031 bzero(rd->age_rx_ring, AGE_RX_RING_SZ);
3032 for (i = 0; i < AGE_RX_RING_CNT; i++) {
3033 rxd = &sc->age_cdata.age_rxdesc[i];
3035 rxd->rx_desc = &rd->age_rx_ring[i];
3036 if (age_newbuf(sc, rxd) != 0)
3040 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag,
3041 sc->age_cdata.age_rx_ring_map,
3042 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3048 age_init_rr_ring(struct age_softc *sc)
3050 struct age_ring_data *rd;
3052 AGE_LOCK_ASSERT(sc);
3054 sc->age_cdata.age_rr_cons = 0;
3055 AGE_RXCHAIN_RESET(sc);
3057 rd = &sc->age_rdata;
3058 bzero(rd->age_rr_ring, AGE_RR_RING_SZ);
3059 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag,
3060 sc->age_cdata.age_rr_ring_map,
3061 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3065 age_init_cmb_block(struct age_softc *sc)
3067 struct age_ring_data *rd;
3069 AGE_LOCK_ASSERT(sc);
3071 rd = &sc->age_rdata;
3072 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ);
3073 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag,
3074 sc->age_cdata.age_cmb_block_map,
3075 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3079 age_init_smb_block(struct age_softc *sc)
3081 struct age_ring_data *rd;
3083 AGE_LOCK_ASSERT(sc);
3085 rd = &sc->age_rdata;
3086 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ);
3087 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag,
3088 sc->age_cdata.age_smb_block_map,
3089 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3093 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd)
3095 struct rx_desc *desc;
3097 bus_dma_segment_t segs[1];
3101 AGE_LOCK_ASSERT(sc);
3103 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3106 m->m_len = m->m_pkthdr.len = MCLBYTES;
3107 m_adj(m, ETHER_ALIGN);
3109 if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag,
3110 sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3114 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3116 if (rxd->rx_m != NULL) {
3117 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
3118 BUS_DMASYNC_POSTREAD);
3119 bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap);
3121 map = rxd->rx_dmamap;
3122 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap;
3123 sc->age_cdata.age_rx_sparemap = map;
3124 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap,
3125 BUS_DMASYNC_PREREAD);
3128 desc = rxd->rx_desc;
3129 desc->addr = htole64(segs[0].ds_addr);
3130 desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) <<
3136 age_rxvlan(struct age_softc *sc)
3141 AGE_LOCK_ASSERT(sc);
3144 reg = CSR_READ_4(sc, AGE_MAC_CFG);
3145 reg &= ~MAC_CFG_VLAN_TAG_STRIP;
3146 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3147 reg |= MAC_CFG_VLAN_TAG_STRIP;
3148 CSR_WRITE_4(sc, AGE_MAC_CFG, reg);
3152 age_rxfilter(struct age_softc *sc)
3155 struct ifmultiaddr *ifma;
3160 AGE_LOCK_ASSERT(sc);
3164 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG);
3165 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC);
3166 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3167 rxcfg |= MAC_CFG_BCAST;
3168 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3169 if ((ifp->if_flags & IFF_PROMISC) != 0)
3170 rxcfg |= MAC_CFG_PROMISC;
3171 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3172 rxcfg |= MAC_CFG_ALLMULTI;
3173 CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF);
3174 CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF);
3175 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
3179 /* Program new filter. */
3180 bzero(mchash, sizeof(mchash));
3183 TAILQ_FOREACH(ifma, &sc->age_ifp->if_multiaddrs, ifma_link) {
3184 if (ifma->ifma_addr->sa_family != AF_LINK)
3186 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
3187 ifma->ifma_addr), ETHER_ADDR_LEN);
3188 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f);
3190 IF_ADDR_UNLOCK(ifp);
3192 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]);
3193 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]);
3194 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg);
3198 sysctl_age_stats(SYSCTL_HANDLER_ARGS)
3200 struct age_softc *sc;
3201 struct age_stats *stats;
3205 error = sysctl_handle_int(oidp, &result, 0, req);
3207 if (error != 0 || req->newptr == NULL)
3213 sc = (struct age_softc *)arg1;
3214 stats = &sc->age_stat;
3215 printf("%s statistics:\n", device_get_nameunit(sc->age_dev));
3216 printf("Transmit good frames : %ju\n",
3217 (uintmax_t)stats->tx_frames);
3218 printf("Transmit good broadcast frames : %ju\n",
3219 (uintmax_t)stats->tx_bcast_frames);
3220 printf("Transmit good multicast frames : %ju\n",
3221 (uintmax_t)stats->tx_mcast_frames);
3222 printf("Transmit pause control frames : %u\n",
3223 stats->tx_pause_frames);
3224 printf("Transmit control frames : %u\n",
3225 stats->tx_control_frames);
3226 printf("Transmit frames with excessive deferrals : %u\n",
3227 stats->tx_excess_defer);
3228 printf("Transmit deferrals : %u\n",
3229 stats->tx_deferred);
3230 printf("Transmit good octets : %ju\n",
3231 (uintmax_t)stats->tx_bytes);
3232 printf("Transmit good broadcast octets : %ju\n",
3233 (uintmax_t)stats->tx_bcast_bytes);
3234 printf("Transmit good multicast octets : %ju\n",
3235 (uintmax_t)stats->tx_mcast_bytes);
3236 printf("Transmit frames 64 bytes : %ju\n",
3237 (uintmax_t)stats->tx_pkts_64);
3238 printf("Transmit frames 65 to 127 bytes : %ju\n",
3239 (uintmax_t)stats->tx_pkts_65_127);
3240 printf("Transmit frames 128 to 255 bytes : %ju\n",
3241 (uintmax_t)stats->tx_pkts_128_255);
3242 printf("Transmit frames 256 to 511 bytes : %ju\n",
3243 (uintmax_t)stats->tx_pkts_256_511);
3244 printf("Transmit frames 512 to 1024 bytes : %ju\n",
3245 (uintmax_t)stats->tx_pkts_512_1023);
3246 printf("Transmit frames 1024 to 1518 bytes : %ju\n",
3247 (uintmax_t)stats->tx_pkts_1024_1518);
3248 printf("Transmit frames 1519 to MTU bytes : %ju\n",
3249 (uintmax_t)stats->tx_pkts_1519_max);
3250 printf("Transmit single collisions : %u\n",
3251 stats->tx_single_colls);
3252 printf("Transmit multiple collisions : %u\n",
3253 stats->tx_multi_colls);
3254 printf("Transmit late collisions : %u\n",
3255 stats->tx_late_colls);
3256 printf("Transmit abort due to excessive collisions : %u\n",
3257 stats->tx_excess_colls);
3258 printf("Transmit underruns due to FIFO underruns : %u\n",
3259 stats->tx_underrun);
3260 printf("Transmit descriptor write-back errors : %u\n",
3261 stats->tx_desc_underrun);
3262 printf("Transmit frames with length mismatched frame size : %u\n",
3264 printf("Transmit frames with truncated due to MTU size : %u\n",
3267 printf("Receive good frames : %ju\n",
3268 (uintmax_t)stats->rx_frames);
3269 printf("Receive good broadcast frames : %ju\n",
3270 (uintmax_t)stats->rx_bcast_frames);
3271 printf("Receive good multicast frames : %ju\n",
3272 (uintmax_t)stats->rx_mcast_frames);
3273 printf("Receive pause control frames : %u\n",
3274 stats->rx_pause_frames);
3275 printf("Receive control frames : %u\n",
3276 stats->rx_control_frames);
3277 printf("Receive CRC errors : %u\n",
3279 printf("Receive frames with length errors : %u\n",
3281 printf("Receive good octets : %ju\n",
3282 (uintmax_t)stats->rx_bytes);
3283 printf("Receive good broadcast octets : %ju\n",
3284 (uintmax_t)stats->rx_bcast_bytes);
3285 printf("Receive good multicast octets : %ju\n",
3286 (uintmax_t)stats->rx_mcast_bytes);
3287 printf("Receive frames too short : %u\n",
3289 printf("Receive fragmented frames : %ju\n",
3290 (uintmax_t)stats->rx_fragments);
3291 printf("Receive frames 64 bytes : %ju\n",
3292 (uintmax_t)stats->rx_pkts_64);
3293 printf("Receive frames 65 to 127 bytes : %ju\n",
3294 (uintmax_t)stats->rx_pkts_65_127);
3295 printf("Receive frames 128 to 255 bytes : %ju\n",
3296 (uintmax_t)stats->rx_pkts_128_255);
3297 printf("Receive frames 256 to 511 bytes : %ju\n",
3298 (uintmax_t)stats->rx_pkts_256_511);
3299 printf("Receive frames 512 to 1024 bytes : %ju\n",
3300 (uintmax_t)stats->rx_pkts_512_1023);
3301 printf("Receive frames 1024 to 1518 bytes : %ju\n",
3302 (uintmax_t)stats->rx_pkts_1024_1518);
3303 printf("Receive frames 1519 to MTU bytes : %ju\n",
3304 (uintmax_t)stats->rx_pkts_1519_max);
3305 printf("Receive frames too long : %ju\n",
3306 (uint64_t)stats->rx_pkts_truncated);
3307 printf("Receive frames with FIFO overflow : %u\n",
3308 stats->rx_fifo_oflows);
3309 printf("Receive frames with return descriptor overflow : %u\n",
3310 stats->rx_desc_oflows);
3311 printf("Receive frames with alignment errors : %u\n",
3312 stats->rx_alignerrs);
3313 printf("Receive frames dropped due to address filtering : %ju\n",
3314 (uint64_t)stats->rx_pkts_filtered);
3320 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3326 value = *(int *)arg1;
3327 error = sysctl_handle_int(oidp, &value, 0, req);
3328 if (error || req->newptr == NULL)
3330 if (value < low || value > high)
3332 *(int *)arg1 = value;
3338 sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS)
3340 return (sysctl_int_range(oidp, arg1, arg2, req,
3341 AGE_PROC_MIN, AGE_PROC_MAX));
3345 sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS)
3348 return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN,