2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
39 #include <sys/module.h>
41 #include <sys/queue.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
67 #include <machine/atomic.h>
68 #include <machine/bus.h>
69 #include <machine/in_cksum.h>
71 #include <dev/jme/if_jmereg.h>
72 #include <dev/jme/if_jmevar.h>
74 /* "device miibus" required. See GENERIC if you get errors here. */
75 #include "miibus_if.h"
77 /* Define the following to disable printing Rx errors. */
78 #undef JME_SHOW_ERRORS
80 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
82 MODULE_DEPEND(jme, pci, 1, 1, 1);
83 MODULE_DEPEND(jme, ether, 1, 1, 1);
84 MODULE_DEPEND(jme, miibus, 1, 1, 1);
87 static int msi_disable = 0;
88 static int msix_disable = 0;
89 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
90 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
93 * Devices supported by this driver.
95 static struct jme_dev {
96 uint16_t jme_vendorid;
97 uint16_t jme_deviceid;
100 { VENDORID_JMICRON, DEVICEID_JMC250,
101 "JMicron Inc, JMC250 Gigabit Ethernet" },
102 { VENDORID_JMICRON, DEVICEID_JMC260,
103 "JMicron Inc, JMC260 Fast Ethernet" },
106 static int jme_miibus_readreg(device_t, int, int);
107 static int jme_miibus_writereg(device_t, int, int, int);
108 static void jme_miibus_statchg(device_t);
109 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
110 static int jme_mediachange(struct ifnet *);
111 static int jme_probe(device_t);
112 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113 static int jme_eeprom_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_map_intr_vector(struct jme_softc *);
116 static int jme_attach(device_t);
117 static int jme_detach(device_t);
118 static void jme_sysctl_node(struct jme_softc *);
119 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
120 static int jme_dma_alloc(struct jme_softc *);
121 static void jme_dma_free(struct jme_softc *);
122 static int jme_shutdown(device_t);
123 static void jme_setlinkspeed(struct jme_softc *);
124 static void jme_setwol(struct jme_softc *);
125 static int jme_suspend(device_t);
126 static int jme_resume(device_t);
127 static int jme_encap(struct jme_softc *, struct mbuf **);
128 static void jme_tx_task(void *, int);
129 static void jme_start(struct ifnet *);
130 static void jme_watchdog(struct jme_softc *);
131 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
132 static void jme_mac_config(struct jme_softc *);
133 static void jme_link_task(void *, int);
134 static int jme_intr(void *);
135 static void jme_int_task(void *, int);
136 static void jme_txeof(struct jme_softc *);
137 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
138 static void jme_rxeof(struct jme_softc *);
139 static int jme_rxintr(struct jme_softc *, int);
140 static void jme_tick(void *);
141 static void jme_reset(struct jme_softc *);
142 static void jme_init(void *);
143 static void jme_init_locked(struct jme_softc *);
144 static void jme_stop(struct jme_softc *);
145 static void jme_stop_tx(struct jme_softc *);
146 static void jme_stop_rx(struct jme_softc *);
147 static int jme_init_rx_ring(struct jme_softc *);
148 static void jme_init_tx_ring(struct jme_softc *);
149 static void jme_init_ssb(struct jme_softc *);
150 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
151 static void jme_set_vlan(struct jme_softc *);
152 static void jme_set_filter(struct jme_softc *);
153 static void jme_stats_clear(struct jme_softc *);
154 static void jme_stats_save(struct jme_softc *);
155 static void jme_stats_update(struct jme_softc *);
156 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
157 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
158 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
159 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
160 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
161 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
164 static device_method_t jme_methods[] = {
165 /* Device interface. */
166 DEVMETHOD(device_probe, jme_probe),
167 DEVMETHOD(device_attach, jme_attach),
168 DEVMETHOD(device_detach, jme_detach),
169 DEVMETHOD(device_shutdown, jme_shutdown),
170 DEVMETHOD(device_suspend, jme_suspend),
171 DEVMETHOD(device_resume, jme_resume),
174 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
175 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
176 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
181 static driver_t jme_driver = {
184 sizeof(struct jme_softc)
187 static devclass_t jme_devclass;
189 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0);
190 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
192 static struct resource_spec jme_res_spec_mem[] = {
193 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
197 static struct resource_spec jme_irq_spec_legacy[] = {
198 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
202 static struct resource_spec jme_irq_spec_msi[] = {
203 { SYS_RES_IRQ, 1, RF_ACTIVE },
204 { SYS_RES_IRQ, 2, RF_ACTIVE },
205 { SYS_RES_IRQ, 3, RF_ACTIVE },
206 { SYS_RES_IRQ, 4, RF_ACTIVE },
207 { SYS_RES_IRQ, 5, RF_ACTIVE },
208 { SYS_RES_IRQ, 6, RF_ACTIVE },
209 { SYS_RES_IRQ, 7, RF_ACTIVE },
210 { SYS_RES_IRQ, 8, RF_ACTIVE },
215 * Read a PHY register on the MII of the JMC250.
218 jme_miibus_readreg(device_t dev, int phy, int reg)
220 struct jme_softc *sc;
224 sc = device_get_softc(dev);
226 /* For FPGA version, PHY address 0 should be ignored. */
227 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
231 if (sc->jme_phyaddr != phy)
235 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
236 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
237 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
239 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
244 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
248 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
252 * Write a PHY register on the MII of the JMC250.
255 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
257 struct jme_softc *sc;
260 sc = device_get_softc(dev);
262 /* For FPGA version, PHY address 0 should be ignored. */
263 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
267 if (sc->jme_phyaddr != phy)
271 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
272 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
273 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
274 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
276 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
281 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
287 * Callback from MII layer when media changes.
290 jme_miibus_statchg(device_t dev)
292 struct jme_softc *sc;
294 sc = device_get_softc(dev);
295 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
299 * Get the current interface media status.
302 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
304 struct jme_softc *sc;
305 struct mii_data *mii;
309 if ((ifp->if_flags & IFF_UP) == 0) {
313 mii = device_get_softc(sc->jme_miibus);
316 ifmr->ifm_status = mii->mii_media_status;
317 ifmr->ifm_active = mii->mii_media_active;
322 * Set hardware to newly-selected media.
325 jme_mediachange(struct ifnet *ifp)
327 struct jme_softc *sc;
328 struct mii_data *mii;
329 struct mii_softc *miisc;
334 mii = device_get_softc(sc->jme_miibus);
335 if (mii->mii_instance != 0) {
336 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
337 mii_phy_reset(miisc);
339 error = mii_mediachg(mii);
346 jme_probe(device_t dev)
350 uint16_t vendor, devid;
352 vendor = pci_get_vendor(dev);
353 devid = pci_get_device(dev);
355 for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]);
357 if (vendor == sp->jme_vendorid &&
358 devid == sp->jme_deviceid) {
359 device_set_desc(dev, sp->jme_name);
360 return (BUS_PROBE_DEFAULT);
368 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
374 for (i = JME_TIMEOUT; i > 0; i--) {
375 reg = CSR_READ_4(sc, JME_SMBCSR);
376 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
382 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
386 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
387 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
388 for (i = JME_TIMEOUT; i > 0; i--) {
390 reg = CSR_READ_4(sc, JME_SMBINTF);
391 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
396 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
400 reg = CSR_READ_4(sc, JME_SMBINTF);
401 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
407 jme_eeprom_macaddr(struct jme_softc *sc)
409 uint8_t eaddr[ETHER_ADDR_LEN];
410 uint8_t fup, reg, val;
415 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
416 fup != JME_EEPROM_SIG0)
418 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
419 fup != JME_EEPROM_SIG1)
423 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
425 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
426 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
427 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
429 if (reg >= JME_PAR0 &&
430 reg < JME_PAR0 + ETHER_ADDR_LEN) {
431 if (jme_eeprom_read_byte(sc, offset + 2,
434 eaddr[reg - JME_PAR0] = val;
438 /* Check for the end of EEPROM descriptor. */
439 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
441 /* Try next eeprom descriptor. */
442 offset += JME_EEPROM_DESC_BYTES;
443 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
445 if (match == ETHER_ADDR_LEN) {
446 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
454 jme_reg_macaddr(struct jme_softc *sc)
458 /* Read station address. */
459 par0 = CSR_READ_4(sc, JME_PAR0);
460 par1 = CSR_READ_4(sc, JME_PAR1);
462 if ((par0 == 0 && par1 == 0) ||
463 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
464 device_printf(sc->jme_dev,
465 "Failed to retrieve Ethernet address.\n");
467 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
468 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
469 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
470 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
471 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
472 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
477 jme_map_intr_vector(struct jme_softc *sc)
479 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
481 bzero(map, sizeof(map));
483 /* Map Tx interrupts source to MSI/MSIX vector 2. */
484 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
485 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
486 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
487 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
488 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
489 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
490 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
491 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
492 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
493 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
494 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
495 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
496 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
497 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
498 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
499 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
500 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
501 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
502 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
503 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
505 /* Map Rx interrupts source to MSI/MSIX vector 1. */
506 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
507 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
508 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
509 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
510 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
511 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
512 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
513 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
514 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
515 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
516 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
517 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
518 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
519 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
520 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
521 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
522 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
523 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
524 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
525 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
526 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
527 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
528 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
529 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
530 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
531 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
532 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
533 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
534 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
535 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
536 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
537 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
539 /* Map all other interrupts source to MSI/MSIX vector 0. */
540 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
541 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
542 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
543 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
547 jme_attach(device_t dev)
549 struct jme_softc *sc;
551 struct mii_softc *miisc;
552 struct mii_data *mii;
555 int error, i, msic, msixc, pmc;
558 sc = device_get_softc(dev);
561 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
563 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
564 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
565 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
568 * Map the device. JMC250 supports both memory mapped and I/O
569 * register space access. Because I/O register access should
570 * use different BARs to access registers it's waste of time
571 * to use I/O register spce access. JMC250 uses 16K to map
572 * entire memory space.
574 pci_enable_busmaster(dev);
575 sc->jme_res_spec = jme_res_spec_mem;
576 sc->jme_irq_spec = jme_irq_spec_legacy;
577 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
579 device_printf(dev, "cannot allocate memory resources.\n");
583 /* Allocate IRQ resources. */
584 msixc = pci_msix_count(dev);
585 msic = pci_msi_count(dev);
587 device_printf(dev, "MSIX count : %d\n", msixc);
588 device_printf(dev, "MSI count : %d\n", msic);
591 /* Prefer MSIX over MSI. */
592 if (msix_disable == 0 || msi_disable == 0) {
593 if (msix_disable == 0 && msixc == JME_MSIX_MESSAGES &&
594 pci_alloc_msix(dev, &msixc) == 0) {
595 if (msic == JME_MSIX_MESSAGES) {
596 device_printf(dev, "Using %d MSIX messages.\n",
598 sc->jme_flags |= JME_FLAG_MSIX;
599 sc->jme_irq_spec = jme_irq_spec_msi;
601 pci_release_msi(dev);
603 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
604 msic == JME_MSI_MESSAGES &&
605 pci_alloc_msi(dev, &msic) == 0) {
606 if (msic == JME_MSI_MESSAGES) {
607 device_printf(dev, "Using %d MSI messages.\n",
609 sc->jme_flags |= JME_FLAG_MSI;
610 sc->jme_irq_spec = jme_irq_spec_msi;
612 pci_release_msi(dev);
614 /* Map interrupt vector 0, 1 and 2. */
615 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
616 (sc->jme_flags & JME_FLAG_MSIX) != 0)
617 jme_map_intr_vector(sc);
620 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
622 device_printf(dev, "cannot allocate IRQ resources.\n");
626 sc->jme_rev = pci_get_device(dev);
627 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
628 sc->jme_flags |= JME_FLAG_FASTETH;
629 sc->jme_flags |= JME_FLAG_NOJUMBO;
631 reg = CSR_READ_4(sc, JME_CHIPMODE);
632 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
633 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
635 sc->jme_flags |= JME_FLAG_FPGA;
637 device_printf(dev, "PCI device revision : 0x%04x\n",
639 device_printf(dev, "Chip revision : 0x%02x\n",
641 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
642 device_printf(dev, "FPGA revision : 0x%04x\n",
643 (reg & CHIPMODE_FPGA_REV_MASK) >>
644 CHIPMODE_FPGA_REV_SHIFT);
646 if (sc->jme_chip_rev == 0xFF) {
647 device_printf(dev, "Unknown chip revision : 0x%02x\n",
653 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
654 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
655 CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
656 sc->jme_flags |= JME_FLAG_DMA32BIT;
657 sc->jme_flags |= JME_FLAG_TXCLK;
658 sc->jme_flags |= JME_FLAG_HWMIB;
661 /* Reset the ethernet controller. */
664 /* Get station address. */
665 reg = CSR_READ_4(sc, JME_SMBCSR);
666 if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
667 error = jme_eeprom_macaddr(sc);
668 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
669 if (error != 0 && (bootverbose))
670 device_printf(sc->jme_dev,
671 "ethernet hardware address not found in EEPROM.\n");
677 * Integrated JR0211 has fixed PHY address whereas FPGA version
678 * requires PHY probing to get correct PHY address.
680 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
681 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
682 GPREG0_PHY_ADDR_MASK;
684 device_printf(dev, "PHY is at address %d.\n",
689 /* Set max allowable DMA size. */
690 if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
691 sc->jme_flags |= JME_FLAG_PCIE;
692 burst = pci_read_config(dev, i + 0x08, 2);
694 device_printf(dev, "Read request size : %d bytes.\n",
695 128 << ((burst >> 12) & 0x07));
696 device_printf(dev, "TLP payload size : %d bytes.\n",
697 128 << ((burst >> 5) & 0x07));
699 switch ((burst >> 12) & 0x07) {
701 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
704 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
707 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
710 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
712 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
713 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
715 /* Create coalescing sysctl node. */
717 if ((error = jme_dma_alloc(sc) != 0))
720 ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
722 device_printf(dev, "cannot allocate ifnet structure.\n");
728 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
729 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
730 ifp->if_ioctl = jme_ioctl;
731 ifp->if_start = jme_start;
732 ifp->if_init = jme_init;
733 ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
734 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
735 IFQ_SET_READY(&ifp->if_snd);
736 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
737 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
738 ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
739 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
740 sc->jme_flags |= JME_FLAG_PMCAP;
741 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
743 ifp->if_capenable = ifp->if_capabilities;
745 /* Set up MII bus. */
746 if ((error = mii_phy_probe(dev, &sc->jme_miibus, jme_mediachange,
747 jme_mediastatus)) != 0) {
748 device_printf(dev, "no PHY found!\n");
753 * Force PHY to FPGA mode.
755 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
756 mii = device_get_softc(sc->jme_miibus);
757 if (mii->mii_instance != 0) {
758 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
759 if (miisc->mii_phy != 0) {
760 sc->jme_phyaddr = miisc->mii_phy;
764 if (sc->jme_phyaddr != 0) {
765 device_printf(sc->jme_dev,
766 "FPGA PHY is at %d\n", sc->jme_phyaddr);
768 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
774 ether_ifattach(ifp, sc->jme_eaddr);
776 /* VLAN capability setup */
777 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
779 ifp->if_capenable = ifp->if_capabilities;
781 /* Tell the upper layer(s) we support long frames. */
782 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
784 /* Create local taskq. */
785 TASK_INIT(&sc->jme_tx_task, 1, jme_tx_task, ifp);
786 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
787 taskqueue_thread_enqueue, &sc->jme_tq);
788 if (sc->jme_tq == NULL) {
789 device_printf(dev, "could not create taskqueue.\n");
794 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
795 device_get_nameunit(sc->jme_dev));
797 if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
798 msic = JME_MSIX_MESSAGES;
799 else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
800 msic = JME_MSI_MESSAGES;
803 for (i = 0; i < msic; i++) {
804 error = bus_setup_intr(dev, sc->jme_irq[i],
805 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
806 &sc->jme_intrhand[i]);
812 device_printf(dev, "could not set up interrupt handler.\n");
813 taskqueue_free(sc->jme_tq);
827 jme_detach(device_t dev)
829 struct jme_softc *sc;
833 sc = device_get_softc(dev);
836 if (device_is_attached(dev)) {
838 sc->jme_flags |= JME_FLAG_DETACH;
841 callout_drain(&sc->jme_tick_ch);
842 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
843 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
844 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
848 if (sc->jme_tq != NULL) {
849 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
850 taskqueue_free(sc->jme_tq);
854 if (sc->jme_miibus != NULL) {
855 device_delete_child(dev, sc->jme_miibus);
856 sc->jme_miibus = NULL;
858 bus_generic_detach(dev);
867 if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
868 msic = JME_MSIX_MESSAGES;
869 else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
870 msic = JME_MSI_MESSAGES;
873 for (i = 0; i < msic; i++) {
874 if (sc->jme_intrhand[i] != NULL) {
875 bus_teardown_intr(dev, sc->jme_irq[i],
876 sc->jme_intrhand[i]);
877 sc->jme_intrhand[i] = NULL;
881 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
882 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
883 pci_release_msi(dev);
884 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
885 mtx_destroy(&sc->jme_mtx);
890 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \
891 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
894 jme_sysctl_node(struct jme_softc *sc)
896 struct sysctl_ctx_list *ctx;
897 struct sysctl_oid_list *child, *parent;
898 struct sysctl_oid *tree;
899 struct jme_hw_stats *stats;
902 stats = &sc->jme_stats;
903 ctx = device_get_sysctl_ctx(sc->jme_dev);
904 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
906 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
907 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to, 0,
908 sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
910 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
911 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt, 0,
912 sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
914 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
915 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to, 0,
916 sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
918 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
919 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt, 0,
920 sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
922 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
923 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit, 0,
924 sysctl_hw_jme_proc_limit, "I",
925 "max number of Rx events to process");
927 /* Pull in device tunables. */
928 sc->jme_process_limit = JME_PROC_DEFAULT;
929 error = resource_int_value(device_get_name(sc->jme_dev),
930 device_get_unit(sc->jme_dev), "process_limit",
931 &sc->jme_process_limit);
933 if (sc->jme_process_limit < JME_PROC_MIN ||
934 sc->jme_process_limit > JME_PROC_MAX) {
935 device_printf(sc->jme_dev,
936 "process_limit value out of range; "
937 "using default: %d\n", JME_PROC_DEFAULT);
938 sc->jme_process_limit = JME_PROC_DEFAULT;
942 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
943 error = resource_int_value(device_get_name(sc->jme_dev),
944 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
946 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
947 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
948 device_printf(sc->jme_dev,
949 "tx_coal_to value out of range; "
950 "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
951 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
955 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
956 error = resource_int_value(device_get_name(sc->jme_dev),
957 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
959 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
960 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
961 device_printf(sc->jme_dev,
962 "tx_coal_pkt value out of range; "
963 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
964 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
968 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
969 error = resource_int_value(device_get_name(sc->jme_dev),
970 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
972 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
973 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
974 device_printf(sc->jme_dev,
975 "rx_coal_to value out of range; "
976 "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
977 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
981 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
982 error = resource_int_value(device_get_name(sc->jme_dev),
983 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
985 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
986 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
987 device_printf(sc->jme_dev,
988 "tx_coal_pkt value out of range; "
989 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
990 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
994 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
997 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
998 NULL, "JME statistics");
999 parent = SYSCTL_CHILDREN(tree);
1001 /* Rx statistics. */
1002 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
1003 NULL, "Rx MAC statistics");
1004 child = SYSCTL_CHILDREN(tree);
1005 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1006 &stats->rx_good_frames, "Good frames");
1007 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1008 &stats->rx_crc_errs, "CRC errors");
1009 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1010 &stats->rx_mii_errs, "MII errors");
1011 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1012 &stats->rx_fifo_oflows, "FIFO overflows");
1013 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1014 &stats->rx_desc_empty, "Descriptor empty");
1015 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1016 &stats->rx_bad_frames, "Bad frames");
1018 /* Tx statistics. */
1019 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1020 NULL, "Tx MAC statistics");
1021 child = SYSCTL_CHILDREN(tree);
1022 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1023 &stats->tx_good_frames, "Good frames");
1024 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1025 &stats->tx_bad_frames, "Bad frames");
1028 #undef JME_SYSCTL_STAT_ADD32
1030 struct jme_dmamap_arg {
1031 bus_addr_t jme_busaddr;
1035 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1037 struct jme_dmamap_arg *ctx;
1042 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1044 ctx = (struct jme_dmamap_arg *)arg;
1045 ctx->jme_busaddr = segs[0].ds_addr;
1049 jme_dma_alloc(struct jme_softc *sc)
1051 struct jme_dmamap_arg ctx;
1052 struct jme_txdesc *txd;
1053 struct jme_rxdesc *rxd;
1054 bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1057 lowaddr = BUS_SPACE_MAXADDR;
1058 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1059 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1062 /* Create parent ring tag. */
1063 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1064 1, 0, /* algnmnt, boundary */
1065 lowaddr, /* lowaddr */
1066 BUS_SPACE_MAXADDR, /* highaddr */
1067 NULL, NULL, /* filter, filterarg */
1068 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1070 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1072 NULL, NULL, /* lockfunc, lockarg */
1073 &sc->jme_cdata.jme_ring_tag);
1075 device_printf(sc->jme_dev,
1076 "could not create parent ring DMA tag.\n");
1079 /* Create tag for Tx ring. */
1080 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1081 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1082 BUS_SPACE_MAXADDR, /* lowaddr */
1083 BUS_SPACE_MAXADDR, /* highaddr */
1084 NULL, NULL, /* filter, filterarg */
1085 JME_TX_RING_SIZE, /* maxsize */
1087 JME_TX_RING_SIZE, /* maxsegsize */
1089 NULL, NULL, /* lockfunc, lockarg */
1090 &sc->jme_cdata.jme_tx_ring_tag);
1092 device_printf(sc->jme_dev,
1093 "could not allocate Tx ring DMA tag.\n");
1097 /* Create tag for Rx ring. */
1098 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1099 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1100 lowaddr, /* lowaddr */
1101 BUS_SPACE_MAXADDR, /* highaddr */
1102 NULL, NULL, /* filter, filterarg */
1103 JME_RX_RING_SIZE, /* maxsize */
1105 JME_RX_RING_SIZE, /* maxsegsize */
1107 NULL, NULL, /* lockfunc, lockarg */
1108 &sc->jme_cdata.jme_rx_ring_tag);
1110 device_printf(sc->jme_dev,
1111 "could not allocate Rx ring DMA tag.\n");
1115 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1116 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1117 (void **)&sc->jme_rdata.jme_tx_ring,
1118 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1119 &sc->jme_cdata.jme_tx_ring_map);
1121 device_printf(sc->jme_dev,
1122 "could not allocate DMA'able memory for Tx ring.\n");
1126 ctx.jme_busaddr = 0;
1127 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1128 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1129 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1130 if (error != 0 || ctx.jme_busaddr == 0) {
1131 device_printf(sc->jme_dev,
1132 "could not load DMA'able memory for Tx ring.\n");
1135 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1137 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1138 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1139 (void **)&sc->jme_rdata.jme_rx_ring,
1140 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1141 &sc->jme_cdata.jme_rx_ring_map);
1143 device_printf(sc->jme_dev,
1144 "could not allocate DMA'able memory for Rx ring.\n");
1148 ctx.jme_busaddr = 0;
1149 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1150 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1151 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1152 if (error != 0 || ctx.jme_busaddr == 0) {
1153 device_printf(sc->jme_dev,
1154 "could not load DMA'able memory for Rx ring.\n");
1157 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1159 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1160 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1161 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1163 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1165 if ((JME_ADDR_HI(tx_ring_end) !=
1166 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1167 (JME_ADDR_HI(rx_ring_end) !=
1168 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1169 device_printf(sc->jme_dev, "4GB boundary crossed, "
1170 "switching to 32bit DMA address mode.\n");
1172 /* Limit DMA address space to 32bit and try again. */
1173 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1178 lowaddr = BUS_SPACE_MAXADDR;
1179 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1180 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1181 /* Create parent buffer tag. */
1182 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1183 1, 0, /* algnmnt, boundary */
1184 lowaddr, /* lowaddr */
1185 BUS_SPACE_MAXADDR, /* highaddr */
1186 NULL, NULL, /* filter, filterarg */
1187 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1189 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1191 NULL, NULL, /* lockfunc, lockarg */
1192 &sc->jme_cdata.jme_buffer_tag);
1194 device_printf(sc->jme_dev,
1195 "could not create parent buffer DMA tag.\n");
1199 /* Create shadow status block tag. */
1200 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1201 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1202 BUS_SPACE_MAXADDR, /* lowaddr */
1203 BUS_SPACE_MAXADDR, /* highaddr */
1204 NULL, NULL, /* filter, filterarg */
1205 JME_SSB_SIZE, /* maxsize */
1207 JME_SSB_SIZE, /* maxsegsize */
1209 NULL, NULL, /* lockfunc, lockarg */
1210 &sc->jme_cdata.jme_ssb_tag);
1212 device_printf(sc->jme_dev,
1213 "could not create shared status block DMA tag.\n");
1217 /* Create tag for Tx buffers. */
1218 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1219 1, 0, /* algnmnt, boundary */
1220 BUS_SPACE_MAXADDR, /* lowaddr */
1221 BUS_SPACE_MAXADDR, /* highaddr */
1222 NULL, NULL, /* filter, filterarg */
1223 JME_TSO_MAXSIZE, /* maxsize */
1224 JME_MAXTXSEGS, /* nsegments */
1225 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1227 NULL, NULL, /* lockfunc, lockarg */
1228 &sc->jme_cdata.jme_tx_tag);
1230 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1234 /* Create tag for Rx buffers. */
1235 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1236 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1237 BUS_SPACE_MAXADDR, /* lowaddr */
1238 BUS_SPACE_MAXADDR, /* highaddr */
1239 NULL, NULL, /* filter, filterarg */
1240 MCLBYTES, /* maxsize */
1242 MCLBYTES, /* maxsegsize */
1244 NULL, NULL, /* lockfunc, lockarg */
1245 &sc->jme_cdata.jme_rx_tag);
1247 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1252 * Allocate DMA'able memory and load the DMA map for shared
1255 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1256 (void **)&sc->jme_rdata.jme_ssb_block,
1257 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1258 &sc->jme_cdata.jme_ssb_map);
1260 device_printf(sc->jme_dev, "could not allocate DMA'able "
1261 "memory for shared status block.\n");
1265 ctx.jme_busaddr = 0;
1266 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1267 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1268 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1269 if (error != 0 || ctx.jme_busaddr == 0) {
1270 device_printf(sc->jme_dev, "could not load DMA'able memory "
1271 "for shared status block.\n");
1274 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1276 /* Create DMA maps for Tx buffers. */
1277 for (i = 0; i < JME_TX_RING_CNT; i++) {
1278 txd = &sc->jme_cdata.jme_txdesc[i];
1280 txd->tx_dmamap = NULL;
1281 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1284 device_printf(sc->jme_dev,
1285 "could not create Tx dmamap.\n");
1289 /* Create DMA maps for Rx buffers. */
1290 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1291 &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1292 device_printf(sc->jme_dev,
1293 "could not create spare Rx dmamap.\n");
1296 for (i = 0; i < JME_RX_RING_CNT; i++) {
1297 rxd = &sc->jme_cdata.jme_rxdesc[i];
1299 rxd->rx_dmamap = NULL;
1300 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1303 device_printf(sc->jme_dev,
1304 "could not create Rx dmamap.\n");
1314 jme_dma_free(struct jme_softc *sc)
1316 struct jme_txdesc *txd;
1317 struct jme_rxdesc *rxd;
1321 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1322 if (sc->jme_cdata.jme_tx_ring_map)
1323 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1324 sc->jme_cdata.jme_tx_ring_map);
1325 if (sc->jme_cdata.jme_tx_ring_map &&
1326 sc->jme_rdata.jme_tx_ring)
1327 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1328 sc->jme_rdata.jme_tx_ring,
1329 sc->jme_cdata.jme_tx_ring_map);
1330 sc->jme_rdata.jme_tx_ring = NULL;
1331 sc->jme_cdata.jme_tx_ring_map = NULL;
1332 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1333 sc->jme_cdata.jme_tx_ring_tag = NULL;
1336 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1337 if (sc->jme_cdata.jme_rx_ring_map)
1338 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1339 sc->jme_cdata.jme_rx_ring_map);
1340 if (sc->jme_cdata.jme_rx_ring_map &&
1341 sc->jme_rdata.jme_rx_ring)
1342 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1343 sc->jme_rdata.jme_rx_ring,
1344 sc->jme_cdata.jme_rx_ring_map);
1345 sc->jme_rdata.jme_rx_ring = NULL;
1346 sc->jme_cdata.jme_rx_ring_map = NULL;
1347 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1348 sc->jme_cdata.jme_rx_ring_tag = NULL;
1351 if (sc->jme_cdata.jme_tx_tag != NULL) {
1352 for (i = 0; i < JME_TX_RING_CNT; i++) {
1353 txd = &sc->jme_cdata.jme_txdesc[i];
1354 if (txd->tx_dmamap != NULL) {
1355 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1357 txd->tx_dmamap = NULL;
1360 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1361 sc->jme_cdata.jme_tx_tag = NULL;
1364 if (sc->jme_cdata.jme_rx_tag != NULL) {
1365 for (i = 0; i < JME_RX_RING_CNT; i++) {
1366 rxd = &sc->jme_cdata.jme_rxdesc[i];
1367 if (rxd->rx_dmamap != NULL) {
1368 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1370 rxd->rx_dmamap = NULL;
1373 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1374 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1375 sc->jme_cdata.jme_rx_sparemap);
1376 sc->jme_cdata.jme_rx_sparemap = NULL;
1378 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1379 sc->jme_cdata.jme_rx_tag = NULL;
1382 /* Shared status block. */
1383 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1384 if (sc->jme_cdata.jme_ssb_map)
1385 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1386 sc->jme_cdata.jme_ssb_map);
1387 if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block)
1388 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1389 sc->jme_rdata.jme_ssb_block,
1390 sc->jme_cdata.jme_ssb_map);
1391 sc->jme_rdata.jme_ssb_block = NULL;
1392 sc->jme_cdata.jme_ssb_map = NULL;
1393 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1394 sc->jme_cdata.jme_ssb_tag = NULL;
1397 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1398 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1399 sc->jme_cdata.jme_buffer_tag = NULL;
1401 if (sc->jme_cdata.jme_ring_tag != NULL) {
1402 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1403 sc->jme_cdata.jme_ring_tag = NULL;
1408 * Make sure the interface is stopped at reboot time.
1411 jme_shutdown(device_t dev)
1414 return (jme_suspend(dev));
1418 * Unlike other ethernet controllers, JMC250 requires
1419 * explicit resetting link speed to 10/100Mbps as gigabit
1420 * link will cunsume more power than 375mA.
1421 * Note, we reset the link speed to 10/100Mbps with
1422 * auto-negotiation but we don't know whether that operation
1423 * would succeed or not as we have no control after powering
1424 * off. If the renegotiation fail WOL may not work. Running
1425 * at 1Gbps draws more power than 375mA at 3.3V which is
1426 * specified in PCI specification and that would result in
1427 * complete shutdowning power to ethernet controller.
1430 * Save current negotiated media speed/duplex/flow-control
1431 * to softc and restore the same link again after resuming.
1432 * PHY handling such as power down/resetting to 100Mbps
1433 * may be better handled in suspend method in phy driver.
1436 jme_setlinkspeed(struct jme_softc *sc)
1438 struct mii_data *mii;
1441 JME_LOCK_ASSERT(sc);
1443 mii = device_get_softc(sc->jme_miibus);
1446 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1447 switch IFM_SUBTYPE(mii->mii_media_active) {
1457 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1458 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1459 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1460 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1461 BMCR_AUTOEN | BMCR_STARTNEG);
1464 /* Poll link state until jme(4) get a 10/100 link. */
1465 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1467 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1468 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1478 pause("jmelnk", hz);
1481 if (i == MII_ANEGTICKS_GIGE)
1482 device_printf(sc->jme_dev, "establishing link failed, "
1483 "WOL may not work!");
1486 * No link, force MAC to have 100Mbps, full-duplex link.
1487 * This is the last resort and may/may not work.
1489 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1490 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1495 jme_setwol(struct jme_softc *sc)
1502 JME_LOCK_ASSERT(sc);
1504 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1505 /* Remove Tx MAC/offload clock to save more power. */
1506 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1507 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1508 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1509 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1510 /* No PME capability, PHY power down. */
1511 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1512 MII_BMCR, BMCR_PDOWN);
1517 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1518 pmcs = CSR_READ_4(sc, JME_PMCS);
1519 pmcs &= ~PMCS_WOL_ENB_MASK;
1520 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1521 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1522 /* Enable PME message. */
1523 gpr |= GPREG0_PME_ENB;
1524 /* For gigabit controllers, reset link speed to 10/100. */
1525 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1526 jme_setlinkspeed(sc);
1529 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1530 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1531 /* Remove Tx MAC/offload clock to save more power. */
1532 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1533 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1534 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1535 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1537 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1538 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1539 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1540 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1541 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1542 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1543 /* No WOL, PHY power down. */
1544 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1545 MII_BMCR, BMCR_PDOWN);
1550 jme_suspend(device_t dev)
1552 struct jme_softc *sc;
1554 sc = device_get_softc(dev);
1565 jme_resume(device_t dev)
1567 struct jme_softc *sc;
1572 sc = device_get_softc(dev);
1575 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1576 pmstat = pci_read_config(sc->jme_dev,
1577 pmc + PCIR_POWER_STATUS, 2);
1578 /* Disable PME clear PME status. */
1579 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1580 pci_write_config(sc->jme_dev,
1581 pmc + PCIR_POWER_STATUS, pmstat, 2);
1584 if ((ifp->if_flags & IFF_UP) != 0) {
1585 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1586 jme_init_locked(sc);
1595 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1597 struct jme_txdesc *txd;
1598 struct jme_desc *desc;
1600 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1601 int error, i, nsegs, prod;
1602 uint32_t cflags, tso_segsz;
1604 JME_LOCK_ASSERT(sc);
1606 M_ASSERTPKTHDR((*m_head));
1608 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1610 * Due to the adherence to NDIS specification JMC250
1611 * assumes upper stack computed TCP pseudo checksum
1612 * without including payload length. This breaks
1613 * checksum offload for TSO case so recompute TCP
1614 * pseudo checksum for JMC250. Hopefully this wouldn't
1615 * be much burden on modern CPUs.
1617 struct ether_header *eh;
1620 uint32_t ip_off, poff;
1622 if (M_WRITABLE(*m_head) == 0) {
1623 /* Get a writable copy. */
1624 m = m_dup(*m_head, M_DONTWAIT);
1632 ip_off = sizeof(struct ether_header);
1633 m = m_pullup(*m_head, ip_off);
1638 eh = mtod(m, struct ether_header *);
1639 /* Check the existence of VLAN tag. */
1640 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1641 ip_off = sizeof(struct ether_vlan_header);
1642 m = m_pullup(m, ip_off);
1648 m = m_pullup(m, ip_off + sizeof(struct ip));
1653 ip = (struct ip *)(mtod(m, char *) + ip_off);
1654 poff = ip_off + (ip->ip_hl << 2);
1655 m = m_pullup(m, poff + sizeof(struct tcphdr));
1660 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1662 * Reset IP checksum and recompute TCP pseudo
1663 * checksum that NDIS specification requires.
1666 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1667 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1669 htons((tcp->th_off << 2) + IPPROTO_TCP));
1670 /* No need to TSO, force IP checksum offload. */
1671 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1672 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1674 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1675 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1679 prod = sc->jme_cdata.jme_tx_prod;
1680 txd = &sc->jme_cdata.jme_txdesc[prod];
1682 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1683 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1684 if (error == EFBIG) {
1685 m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS);
1692 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1693 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1699 } else if (error != 0)
1708 * Check descriptor overrun. Leave one free descriptor.
1709 * Since we always use 64bit address mode for transmitting,
1710 * each Tx request requires one more dummy descriptor.
1712 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1713 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1720 /* Configure checksum offload and TSO. */
1721 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1722 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1724 cflags |= JME_TD_TSO;
1726 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1727 cflags |= JME_TD_IPCSUM;
1728 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1729 cflags |= JME_TD_TCPCSUM;
1730 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1731 cflags |= JME_TD_UDPCSUM;
1733 /* Configure VLAN. */
1734 if ((m->m_flags & M_VLANTAG) != 0) {
1735 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1736 cflags |= JME_TD_VLAN_TAG;
1739 desc = &sc->jme_rdata.jme_tx_ring[prod];
1740 desc->flags = htole32(cflags);
1741 desc->buflen = htole32(tso_segsz);
1742 desc->addr_hi = htole32(m->m_pkthdr.len);
1744 sc->jme_cdata.jme_tx_cnt++;
1745 JME_DESC_INC(prod, JME_TX_RING_CNT);
1746 for (i = 0; i < nsegs; i++) {
1747 desc = &sc->jme_rdata.jme_tx_ring[prod];
1748 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1749 desc->buflen = htole32(txsegs[i].ds_len);
1750 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1751 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1752 sc->jme_cdata.jme_tx_cnt++;
1753 JME_DESC_INC(prod, JME_TX_RING_CNT);
1756 /* Update producer index. */
1757 sc->jme_cdata.jme_tx_prod = prod;
1759 * Finally request interrupt and give the first descriptor
1760 * owenership to hardware.
1762 desc = txd->tx_desc;
1763 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1766 txd->tx_ndesc = nsegs + 1;
1768 /* Sync descriptors. */
1769 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1770 BUS_DMASYNC_PREWRITE);
1771 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1772 sc->jme_cdata.jme_tx_ring_map,
1773 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1779 jme_tx_task(void *arg, int pending)
1783 ifp = (struct ifnet *)arg;
1788 jme_start(struct ifnet *ifp)
1790 struct jme_softc *sc;
1791 struct mbuf *m_head;
1798 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1801 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1802 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) {
1807 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1808 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1812 * Pack the data into the transmit ring. If we
1813 * don't have room, set the OACTIVE flag and wait
1814 * for the NIC to drain the ring.
1816 if (jme_encap(sc, &m_head)) {
1819 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1820 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1826 * If there's a BPF listener, bounce a copy of this frame
1829 ETHER_BPF_MTAP(ifp, m_head);
1834 * Reading TXCSR takes very long time under heavy load
1835 * so cache TXCSR value and writes the ORed value with
1836 * the kick command to the TXCSR. This saves one register
1839 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1840 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1841 /* Set a timeout in case the chip goes out to lunch. */
1842 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1849 jme_watchdog(struct jme_softc *sc)
1853 JME_LOCK_ASSERT(sc);
1855 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1859 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1860 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1862 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1863 jme_init_locked(sc);
1867 if (sc->jme_cdata.jme_tx_cnt == 0) {
1868 if_printf(sc->jme_ifp,
1869 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1870 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1871 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1875 if_printf(sc->jme_ifp, "watchdog timeout\n");
1877 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1878 jme_init_locked(sc);
1879 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1880 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1884 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1886 struct jme_softc *sc;
1888 struct mii_data *mii;
1893 ifr = (struct ifreq *)data;
1897 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1898 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1899 ifr->ifr_mtu > JME_MAX_MTU)) {
1904 if (ifp->if_mtu != ifr->ifr_mtu) {
1906 * No special configuration is required when interface
1907 * MTU is changed but availability of TSO/Tx checksum
1908 * offload should be chcked against new MTU size as
1909 * FIFO size is just 2K.
1912 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1913 ifp->if_capenable &=
1914 ~(IFCAP_TXCSUM | IFCAP_TSO4);
1916 ~(JME_CSUM_FEATURES | CSUM_TSO);
1917 VLAN_CAPABILITIES(ifp);
1919 ifp->if_mtu = ifr->ifr_mtu;
1920 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1921 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1922 jme_init_locked(sc);
1929 if ((ifp->if_flags & IFF_UP) != 0) {
1930 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1931 if (((ifp->if_flags ^ sc->jme_if_flags)
1932 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1935 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
1936 jme_init_locked(sc);
1939 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1942 sc->jme_if_flags = ifp->if_flags;
1948 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1954 mii = device_get_softc(sc->jme_miibus);
1955 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1959 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1960 if ((mask & IFCAP_TXCSUM) != 0 &&
1961 ifp->if_mtu < JME_TX_FIFO_SIZE) {
1962 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1963 ifp->if_capenable ^= IFCAP_TXCSUM;
1964 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1965 ifp->if_hwassist |= JME_CSUM_FEATURES;
1967 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1970 if ((mask & IFCAP_RXCSUM) != 0 &&
1971 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1972 ifp->if_capenable ^= IFCAP_RXCSUM;
1973 reg = CSR_READ_4(sc, JME_RXMAC);
1974 reg &= ~RXMAC_CSUM_ENB;
1975 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1976 reg |= RXMAC_CSUM_ENB;
1977 CSR_WRITE_4(sc, JME_RXMAC, reg);
1979 if ((mask & IFCAP_TSO4) != 0 &&
1980 ifp->if_mtu < JME_TX_FIFO_SIZE) {
1981 if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1982 ifp->if_capenable ^= IFCAP_TSO4;
1983 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1984 ifp->if_hwassist |= CSUM_TSO;
1986 ifp->if_hwassist &= ~CSUM_TSO;
1989 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1990 (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
1991 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1992 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1993 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1994 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1995 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1996 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1997 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2001 VLAN_CAPABILITIES(ifp);
2004 error = ether_ioctl(ifp, cmd, data);
2012 jme_mac_config(struct jme_softc *sc)
2014 struct mii_data *mii;
2015 uint32_t ghc, gpreg, rxmac, txmac, txpause;
2018 JME_LOCK_ASSERT(sc);
2020 mii = device_get_softc(sc->jme_miibus);
2022 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2024 CSR_WRITE_4(sc, JME_GHC, 0);
2027 rxmac = CSR_READ_4(sc, JME_RXMAC);
2028 rxmac &= ~RXMAC_FC_ENB;
2029 txmac = CSR_READ_4(sc, JME_TXMAC);
2030 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2031 txpause = CSR_READ_4(sc, JME_TXPFC);
2032 txpause &= ~TXPFC_PAUSE_ENB;
2033 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2034 ghc |= GHC_FULL_DUPLEX;
2035 rxmac &= ~RXMAC_COLL_DET_ENB;
2036 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2037 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2040 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2041 txpause |= TXPFC_PAUSE_ENB;
2042 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2043 rxmac |= RXMAC_FC_ENB;
2045 /* Disable retry transmit timer/retry limit. */
2046 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2047 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2049 rxmac |= RXMAC_COLL_DET_ENB;
2050 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2051 /* Enable retry transmit timer/retry limit. */
2052 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2053 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2055 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2056 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2058 ghc |= GHC_SPEED_10;
2059 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2062 ghc |= GHC_SPEED_100;
2063 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2066 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2068 ghc |= GHC_SPEED_1000;
2069 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2070 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2071 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2076 if (sc->jme_rev == DEVICEID_JMC250 &&
2077 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2079 * Workaround occasional packet loss issue of JMC250 A2
2080 * when it runs on half-duplex media.
2082 gpreg = CSR_READ_4(sc, JME_GPREG1);
2083 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2084 gpreg &= ~GPREG1_HDPX_FIX;
2086 gpreg |= GPREG1_HDPX_FIX;
2087 CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2088 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
2089 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2090 /* Extend interface FIFO depth. */
2091 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2094 /* Select default interface FIFO depth. */
2095 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2099 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2101 CSR_WRITE_4(sc, JME_GHC, ghc);
2102 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2103 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2104 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2108 jme_link_task(void *arg, int pending)
2110 struct jme_softc *sc;
2111 struct mii_data *mii;
2113 struct jme_txdesc *txd;
2117 sc = (struct jme_softc *)arg;
2120 mii = device_get_softc(sc->jme_miibus);
2122 if (mii == NULL || ifp == NULL ||
2123 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2128 sc->jme_flags &= ~JME_FLAG_LINK;
2129 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2130 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2133 sc->jme_flags |= JME_FLAG_LINK;
2136 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2138 sc->jme_flags |= JME_FLAG_LINK;
2146 * Disabling Rx/Tx MACs have a side-effect of resetting
2147 * JME_TXNDA/JME_RXNDA register to the first address of
2148 * Tx/Rx descriptor address. So driver should reset its
2149 * internal procucer/consumer pointer and reclaim any
2150 * allocated resources. Note, just saving the value of
2151 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2152 * and restoring JME_TXNDA/JME_RXNDA register is not
2153 * sufficient to make sure correct MAC state because
2154 * stopping MAC operation can take a while and hardware
2155 * might have updated JME_TXNDA/JME_RXNDA registers
2156 * during the stop operation.
2158 /* Block execution of task. */
2159 taskqueue_block(sc->jme_tq);
2160 /* Disable interrupts and stop driver. */
2161 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2162 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2163 callout_stop(&sc->jme_tick_ch);
2164 sc->jme_watchdog_timer = 0;
2166 /* Stop receiver/transmitter. */
2170 /* XXX Drain all queued tasks. */
2172 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2173 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
2176 jme_rxintr(sc, JME_RX_RING_CNT);
2177 if (sc->jme_cdata.jme_rxhead != NULL)
2178 m_freem(sc->jme_cdata.jme_rxhead);
2179 JME_RXCHAIN_RESET(sc);
2181 if (sc->jme_cdata.jme_tx_cnt != 0) {
2182 /* Remove queued packets for transmit. */
2183 for (i = 0; i < JME_TX_RING_CNT; i++) {
2184 txd = &sc->jme_cdata.jme_txdesc[i];
2185 if (txd->tx_m != NULL) {
2187 sc->jme_cdata.jme_tx_tag,
2189 BUS_DMASYNC_POSTWRITE);
2191 sc->jme_cdata.jme_tx_tag,
2202 * Reuse configured Rx descriptors and reset
2203 * procuder/consumer index.
2205 sc->jme_cdata.jme_rx_cons = 0;
2206 atomic_set_int(&sc->jme_morework, 0);
2207 jme_init_tx_ring(sc);
2208 /* Initialize shadow status block. */
2211 /* Program MAC with resolved speed/duplex/flow-control. */
2212 if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2214 jme_stats_clear(sc);
2216 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2217 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2219 /* Set Tx ring address to the hardware. */
2220 paddr = JME_TX_RING_ADDR(sc, 0);
2221 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2222 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2224 /* Set Rx ring address to the hardware. */
2225 paddr = JME_RX_RING_ADDR(sc, 0);
2226 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2227 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2229 /* Restart receiver/transmitter. */
2230 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2232 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2235 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2236 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2237 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2238 /* Unblock execution of task. */
2239 taskqueue_unblock(sc->jme_tq);
2240 /* Reenable interrupts. */
2241 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2249 struct jme_softc *sc;
2252 sc = (struct jme_softc *)arg;
2254 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2255 if (status == 0 || status == 0xFFFFFFFF)
2256 return (FILTER_STRAY);
2257 /* Disable interrupts. */
2258 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2259 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2261 return (FILTER_HANDLED);
2265 jme_int_task(void *arg, int pending)
2267 struct jme_softc *sc;
2272 sc = (struct jme_softc *)arg;
2275 status = CSR_READ_4(sc, JME_INTR_STATUS);
2276 more = atomic_readandclear_int(&sc->jme_morework);
2278 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2281 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2283 /* Reset PCC counter/timer and Ack interrupts. */
2284 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2285 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2286 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2287 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2288 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2289 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2291 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2292 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2293 more = jme_rxintr(sc, sc->jme_process_limit);
2295 atomic_set_int(&sc->jme_morework, 1);
2297 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2299 * Notify hardware availability of new Rx
2301 * Reading RXCSR takes very long time under
2302 * heavy load so cache RXCSR value and writes
2303 * the ORed value with the kick command to
2304 * the RXCSR. This saves one register access
2307 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2308 RXCSR_RX_ENB | RXCSR_RXQ_START);
2311 * Reclaiming Tx buffers are deferred to make jme(4) run
2312 * without locks held.
2314 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2315 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
2318 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2319 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2323 /* Reenable interrupts. */
2324 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2328 jme_txeof(struct jme_softc *sc)
2331 struct jme_txdesc *txd;
2335 JME_LOCK_ASSERT(sc);
2339 cons = sc->jme_cdata.jme_tx_cons;
2340 if (cons == sc->jme_cdata.jme_tx_prod)
2343 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2344 sc->jme_cdata.jme_tx_ring_map,
2345 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2348 * Go through our Tx list and free mbufs for those
2349 * frames which have been transmitted.
2351 for (; cons != sc->jme_cdata.jme_tx_prod;) {
2352 txd = &sc->jme_cdata.jme_txdesc[cons];
2353 status = le32toh(txd->tx_desc->flags);
2354 if ((status & JME_TD_OWN) == JME_TD_OWN)
2357 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2361 if ((status & JME_TD_COLLISION) != 0)
2362 ifp->if_collisions +=
2363 le32toh(txd->tx_desc->buflen) &
2364 JME_TD_BUF_LEN_MASK;
2367 * Only the first descriptor of multi-descriptor
2368 * transmission is updated so driver have to skip entire
2369 * chained buffers for the transmiited frame. In other
2370 * words, JME_TD_OWN bit is valid only at the first
2371 * descriptor of a multi-descriptor transmission.
2373 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2374 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2375 JME_DESC_INC(cons, JME_TX_RING_CNT);
2378 /* Reclaim transferred mbufs. */
2379 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2380 BUS_DMASYNC_POSTWRITE);
2381 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2383 KASSERT(txd->tx_m != NULL,
2384 ("%s: freeing NULL mbuf!\n", __func__));
2387 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2388 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2389 ("%s: Active Tx desc counter was garbled\n", __func__));
2391 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2393 sc->jme_cdata.jme_tx_cons = cons;
2394 /* Unarm watchog timer when there is no pending descriptors in queue. */
2395 if (sc->jme_cdata.jme_tx_cnt == 0)
2396 sc->jme_watchdog_timer = 0;
2398 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2399 sc->jme_cdata.jme_tx_ring_map,
2400 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2403 static __inline void
2404 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2406 struct jme_desc *desc;
2408 desc = &sc->jme_rdata.jme_rx_ring[cons];
2409 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2410 desc->buflen = htole32(MCLBYTES);
2413 /* Receive a frame. */
2415 jme_rxeof(struct jme_softc *sc)
2418 struct jme_desc *desc;
2419 struct jme_rxdesc *rxd;
2420 struct mbuf *mp, *m;
2421 uint32_t flags, status;
2422 int cons, count, nsegs;
2426 cons = sc->jme_cdata.jme_rx_cons;
2427 desc = &sc->jme_rdata.jme_rx_ring[cons];
2428 flags = le32toh(desc->flags);
2429 status = le32toh(desc->buflen);
2430 nsegs = JME_RX_NSEGS(status);
2431 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2432 if ((status & JME_RX_ERR_STAT) != 0) {
2434 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2435 #ifdef JME_SHOW_ERRORS
2436 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2437 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2439 sc->jme_cdata.jme_rx_cons += nsegs;
2440 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2444 for (count = 0; count < nsegs; count++,
2445 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2446 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2448 /* Add a new receive buffer to the ring. */
2449 if (jme_newbuf(sc, rxd) != 0) {
2452 for (; count < nsegs; count++) {
2453 jme_discard_rxbuf(sc, cons);
2454 JME_DESC_INC(cons, JME_RX_RING_CNT);
2456 if (sc->jme_cdata.jme_rxhead != NULL) {
2457 m_freem(sc->jme_cdata.jme_rxhead);
2458 JME_RXCHAIN_RESET(sc);
2464 * Assume we've received a full sized frame.
2465 * Actual size is fixed when we encounter the end of
2466 * multi-segmented frame.
2468 mp->m_len = MCLBYTES;
2470 /* Chain received mbufs. */
2471 if (sc->jme_cdata.jme_rxhead == NULL) {
2472 sc->jme_cdata.jme_rxhead = mp;
2473 sc->jme_cdata.jme_rxtail = mp;
2476 * Receive processor can receive a maximum frame
2477 * size of 65535 bytes.
2479 mp->m_flags &= ~M_PKTHDR;
2480 sc->jme_cdata.jme_rxtail->m_next = mp;
2481 sc->jme_cdata.jme_rxtail = mp;
2484 if (count == nsegs - 1) {
2485 /* Last desc. for this frame. */
2486 m = sc->jme_cdata.jme_rxhead;
2487 m->m_flags |= M_PKTHDR;
2488 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2490 /* Set first mbuf size. */
2491 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2492 /* Set last mbuf size. */
2493 mp->m_len = sc->jme_cdata.jme_rxlen -
2494 ((MCLBYTES - JME_RX_PAD_BYTES) +
2495 (MCLBYTES * (nsegs - 2)));
2497 m->m_len = sc->jme_cdata.jme_rxlen;
2498 m->m_pkthdr.rcvif = ifp;
2501 * Account for 10bytes auto padding which is used
2502 * to align IP header on 32bit boundary. Also note,
2503 * CRC bytes is automatically removed by the
2506 m->m_data += JME_RX_PAD_BYTES;
2508 /* Set checksum information. */
2509 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2510 (flags & JME_RD_IPV4) != 0) {
2511 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2512 if ((flags & JME_RD_IPCSUM) != 0)
2513 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2514 if (((flags & JME_RD_MORE_FRAG) == 0) &&
2515 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2516 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2517 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2518 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2519 m->m_pkthdr.csum_flags |=
2520 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2521 m->m_pkthdr.csum_data = 0xffff;
2525 /* Check for VLAN tagged packets. */
2526 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2527 (flags & JME_RD_VLAN_TAG) != 0) {
2528 m->m_pkthdr.ether_vtag =
2529 flags & JME_RD_VLAN_MASK;
2530 m->m_flags |= M_VLANTAG;
2535 (*ifp->if_input)(ifp, m);
2537 /* Reset mbuf chains. */
2538 JME_RXCHAIN_RESET(sc);
2542 sc->jme_cdata.jme_rx_cons += nsegs;
2543 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2547 jme_rxintr(struct jme_softc *sc, int count)
2549 struct jme_desc *desc;
2550 int nsegs, prog, pktlen;
2552 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2553 sc->jme_cdata.jme_rx_ring_map,
2554 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2556 for (prog = 0; count > 0; prog++) {
2557 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2558 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2560 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2562 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2564 * Check number of segments against received bytes.
2565 * Non-matching value would indicate that hardware
2566 * is still trying to update Rx descriptors. I'm not
2567 * sure whether this check is needed.
2569 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2570 if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES))
2573 /* Received a frame. */
2579 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2580 sc->jme_cdata.jme_rx_ring_map,
2581 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2583 return (count > 0 ? 0 : EAGAIN);
2589 struct jme_softc *sc;
2590 struct mii_data *mii;
2592 sc = (struct jme_softc *)arg;
2594 JME_LOCK_ASSERT(sc);
2596 mii = device_get_softc(sc->jme_miibus);
2599 * Reclaim Tx buffers that have been completed. It's not
2600 * needed here but it would release allocated mbuf chains
2601 * faster and limit the maximum delay to a hz.
2604 jme_stats_update(sc);
2606 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2610 jme_reset(struct jme_softc *sc)
2613 /* Stop receiver, transmitter. */
2616 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2618 CSR_WRITE_4(sc, JME_GHC, 0);
2624 struct jme_softc *sc;
2626 sc = (struct jme_softc *)xsc;
2628 jme_init_locked(sc);
2633 jme_init_locked(struct jme_softc *sc)
2636 struct mii_data *mii;
2637 uint8_t eaddr[ETHER_ADDR_LEN];
2642 JME_LOCK_ASSERT(sc);
2645 mii = device_get_softc(sc->jme_miibus);
2647 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2650 * Cancel any pending I/O.
2655 * Reset the chip to a known state.
2659 /* Init descriptors. */
2660 error = jme_init_rx_ring(sc);
2662 device_printf(sc->jme_dev,
2663 "%s: initialization failed: no memory for Rx buffers.\n",
2668 jme_init_tx_ring(sc);
2669 /* Initialize shadow status block. */
2672 /* Reprogram the station address. */
2673 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2674 CSR_WRITE_4(sc, JME_PAR0,
2675 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2676 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2679 * Configure Tx queue.
2680 * Tx priority queue weight value : 0
2681 * Tx FIFO threshold for processing next packet : 16QW
2682 * Maximum Tx DMA length : 512
2683 * Allow Tx DMA burst.
2685 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2686 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2687 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2688 sc->jme_txcsr |= sc->jme_tx_dma_size;
2689 sc->jme_txcsr |= TXCSR_DMA_BURST;
2690 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2692 /* Set Tx descriptor counter. */
2693 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2695 /* Set Tx ring address to the hardware. */
2696 paddr = JME_TX_RING_ADDR(sc, 0);
2697 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2698 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2700 /* Configure TxMAC parameters. */
2701 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2702 reg |= TXMAC_THRESH_1_PKT;
2703 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2704 CSR_WRITE_4(sc, JME_TXMAC, reg);
2707 * Configure Rx queue.
2708 * FIFO full threshold for transmitting Tx pause packet : 128T
2709 * FIFO threshold for processing next packet : 128QW
2711 * Max Rx DMA length : 128
2712 * Rx descriptor retry : 32
2713 * Rx descriptor retry time gap : 256ns
2714 * Don't receive runt/bad frame.
2716 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2718 * Since Rx FIFO size is 4K bytes, receiving frames larger
2719 * than 4K bytes will suffer from Rx FIFO overruns. So
2720 * decrease FIFO threshold to reduce the FIFO overruns for
2721 * frames larger than 4000 bytes.
2722 * For best performance of standard MTU sized frames use
2723 * maximum allowable FIFO threshold, 128QW. Note these do
2724 * not hold on chip full mask verion >=2. For these
2725 * controllers 64QW and 128QW are not valid value.
2727 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2728 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2730 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2731 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2732 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2734 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2736 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2737 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2738 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2739 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2741 /* Set Rx descriptor counter. */
2742 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2744 /* Set Rx ring address to the hardware. */
2745 paddr = JME_RX_RING_ADDR(sc, 0);
2746 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2747 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2749 /* Clear receive filter. */
2750 CSR_WRITE_4(sc, JME_RXMAC, 0);
2751 /* Set up the receive filter. */
2756 * Disable all WOL bits as WOL can interfere normal Rx
2757 * operation. Also clear WOL detection status bits.
2759 reg = CSR_READ_4(sc, JME_PMCS);
2760 reg &= ~PMCS_WOL_ENB_MASK;
2761 CSR_WRITE_4(sc, JME_PMCS, reg);
2763 reg = CSR_READ_4(sc, JME_RXMAC);
2765 * Pad 10bytes right before received frame. This will greatly
2766 * help Rx performance on strict-alignment architectures as
2767 * it does not need to copy the frame to align the payload.
2769 reg |= RXMAC_PAD_10BYTES;
2770 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2771 reg |= RXMAC_CSUM_ENB;
2772 CSR_WRITE_4(sc, JME_RXMAC, reg);
2774 /* Configure general purpose reg0 */
2775 reg = CSR_READ_4(sc, JME_GPREG0);
2776 reg &= ~GPREG0_PCC_UNIT_MASK;
2777 /* Set PCC timer resolution to micro-seconds unit. */
2778 reg |= GPREG0_PCC_UNIT_US;
2780 * Disable all shadow register posting as we have to read
2781 * JME_INTR_STATUS register in jme_int_task. Also it seems
2782 * that it's hard to synchronize interrupt status between
2783 * hardware and software with shadow posting due to
2784 * requirements of bus_dmamap_sync(9).
2786 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2787 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2788 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2789 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2790 /* Disable posting of DW0. */
2791 reg &= ~GPREG0_POST_DW0_ENB;
2792 /* Clear PME message. */
2793 reg &= ~GPREG0_PME_ENB;
2794 /* Set PHY address. */
2795 reg &= ~GPREG0_PHY_ADDR_MASK;
2796 reg |= sc->jme_phyaddr;
2797 CSR_WRITE_4(sc, JME_GPREG0, reg);
2799 /* Configure Tx queue 0 packet completion coalescing. */
2800 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2802 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2803 PCCTX_COAL_PKT_MASK;
2804 reg |= PCCTX_COAL_TXQ0;
2805 CSR_WRITE_4(sc, JME_PCCTX, reg);
2807 /* Configure Rx queue 0 packet completion coalescing. */
2808 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2810 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2811 PCCRX_COAL_PKT_MASK;
2812 CSR_WRITE_4(sc, JME_PCCRX0, reg);
2814 /* Configure shadow status block but don't enable posting. */
2815 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2816 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2817 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2819 /* Disable Timer 1 and Timer 2. */
2820 CSR_WRITE_4(sc, JME_TIMER1, 0);
2821 CSR_WRITE_4(sc, JME_TIMER2, 0);
2823 /* Configure retry transmit period, retry limit value. */
2824 CSR_WRITE_4(sc, JME_TXTRHD,
2825 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2826 TXTRHD_RT_PERIOD_MASK) |
2827 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2828 TXTRHD_RT_LIMIT_SHIFT));
2831 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2833 /* Initialize the interrupt mask. */
2834 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2835 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2838 * Enabling Tx/Rx DMA engines and Rx queue processing is
2839 * done after detection of valid link in jme_link_task.
2842 sc->jme_flags &= ~JME_FLAG_LINK;
2843 /* Set the current media. */
2846 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2848 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2849 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2853 jme_stop(struct jme_softc *sc)
2856 struct jme_txdesc *txd;
2857 struct jme_rxdesc *rxd;
2860 JME_LOCK_ASSERT(sc);
2862 * Mark the interface down and cancel the watchdog timer.
2865 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2866 sc->jme_flags &= ~JME_FLAG_LINK;
2867 callout_stop(&sc->jme_tick_ch);
2868 sc->jme_watchdog_timer = 0;
2871 * Disable interrupts.
2873 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2874 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2876 /* Disable updating shadow status block. */
2877 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2878 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2880 /* Stop receiver, transmitter. */
2884 /* Reclaim Rx/Tx buffers that have been completed. */
2885 jme_rxintr(sc, JME_RX_RING_CNT);
2886 if (sc->jme_cdata.jme_rxhead != NULL)
2887 m_freem(sc->jme_cdata.jme_rxhead);
2888 JME_RXCHAIN_RESET(sc);
2891 * Free RX and TX mbufs still in the queues.
2893 for (i = 0; i < JME_RX_RING_CNT; i++) {
2894 rxd = &sc->jme_cdata.jme_rxdesc[i];
2895 if (rxd->rx_m != NULL) {
2896 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
2897 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2898 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2904 for (i = 0; i < JME_TX_RING_CNT; i++) {
2905 txd = &sc->jme_cdata.jme_txdesc[i];
2906 if (txd->tx_m != NULL) {
2907 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
2908 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2909 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2916 jme_stats_update(sc);
2921 jme_stop_tx(struct jme_softc *sc)
2926 reg = CSR_READ_4(sc, JME_TXCSR);
2927 if ((reg & TXCSR_TX_ENB) == 0)
2929 reg &= ~TXCSR_TX_ENB;
2930 CSR_WRITE_4(sc, JME_TXCSR, reg);
2931 for (i = JME_TIMEOUT; i > 0; i--) {
2933 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2937 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2941 jme_stop_rx(struct jme_softc *sc)
2946 reg = CSR_READ_4(sc, JME_RXCSR);
2947 if ((reg & RXCSR_RX_ENB) == 0)
2949 reg &= ~RXCSR_RX_ENB;
2950 CSR_WRITE_4(sc, JME_RXCSR, reg);
2951 for (i = JME_TIMEOUT; i > 0; i--) {
2953 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2957 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2961 jme_init_tx_ring(struct jme_softc *sc)
2963 struct jme_ring_data *rd;
2964 struct jme_txdesc *txd;
2967 sc->jme_cdata.jme_tx_prod = 0;
2968 sc->jme_cdata.jme_tx_cons = 0;
2969 sc->jme_cdata.jme_tx_cnt = 0;
2971 rd = &sc->jme_rdata;
2972 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2973 for (i = 0; i < JME_TX_RING_CNT; i++) {
2974 txd = &sc->jme_cdata.jme_txdesc[i];
2976 txd->tx_desc = &rd->jme_tx_ring[i];
2980 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2981 sc->jme_cdata.jme_tx_ring_map,
2982 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2986 jme_init_ssb(struct jme_softc *sc)
2988 struct jme_ring_data *rd;
2990 rd = &sc->jme_rdata;
2991 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2992 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2993 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2997 jme_init_rx_ring(struct jme_softc *sc)
2999 struct jme_ring_data *rd;
3000 struct jme_rxdesc *rxd;
3003 sc->jme_cdata.jme_rx_cons = 0;
3004 JME_RXCHAIN_RESET(sc);
3005 atomic_set_int(&sc->jme_morework, 0);
3007 rd = &sc->jme_rdata;
3008 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3009 for (i = 0; i < JME_RX_RING_CNT; i++) {
3010 rxd = &sc->jme_cdata.jme_rxdesc[i];
3012 rxd->rx_desc = &rd->jme_rx_ring[i];
3013 if (jme_newbuf(sc, rxd) != 0)
3017 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3018 sc->jme_cdata.jme_rx_ring_map,
3019 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3025 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3027 struct jme_desc *desc;
3029 bus_dma_segment_t segs[1];
3033 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3037 * JMC250 has 64bit boundary alignment limitation so jme(4)
3038 * takes advantage of 10 bytes padding feature of hardware
3039 * in order not to copy entire frame to align IP header on
3042 m->m_len = m->m_pkthdr.len = MCLBYTES;
3044 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3045 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3049 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3051 if (rxd->rx_m != NULL) {
3052 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3053 BUS_DMASYNC_POSTREAD);
3054 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3056 map = rxd->rx_dmamap;
3057 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3058 sc->jme_cdata.jme_rx_sparemap = map;
3059 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3060 BUS_DMASYNC_PREREAD);
3063 desc = rxd->rx_desc;
3064 desc->buflen = htole32(segs[0].ds_len);
3065 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3066 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3067 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3073 jme_set_vlan(struct jme_softc *sc)
3078 JME_LOCK_ASSERT(sc);
3081 reg = CSR_READ_4(sc, JME_RXMAC);
3082 reg &= ~RXMAC_VLAN_ENB;
3083 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3084 reg |= RXMAC_VLAN_ENB;
3085 CSR_WRITE_4(sc, JME_RXMAC, reg);
3089 jme_set_filter(struct jme_softc *sc)
3092 struct ifmultiaddr *ifma;
3097 JME_LOCK_ASSERT(sc);
3101 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3102 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3104 /* Always accept frames destined to our station address. */
3105 rxcfg |= RXMAC_UNICAST;
3106 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3107 rxcfg |= RXMAC_BROADCAST;
3108 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3109 if ((ifp->if_flags & IFF_PROMISC) != 0)
3110 rxcfg |= RXMAC_PROMISC;
3111 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3112 rxcfg |= RXMAC_ALLMULTI;
3113 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3114 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3115 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3120 * Set up the multicast address filter by passing all multicast
3121 * addresses through a CRC generator, and then using the low-order
3122 * 6 bits as an index into the 64 bit multicast hash table. The
3123 * high order bits select the register, while the rest of the bits
3124 * select the bit within the register.
3126 rxcfg |= RXMAC_MULTICAST;
3127 bzero(mchash, sizeof(mchash));
3129 if_maddr_rlock(ifp);
3130 TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) {
3131 if (ifma->ifma_addr->sa_family != AF_LINK)
3133 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3134 ifma->ifma_addr), ETHER_ADDR_LEN);
3136 /* Just want the 6 least significant bits. */
3139 /* Set the corresponding bit in the hash table. */
3140 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3142 if_maddr_runlock(ifp);
3144 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3145 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3146 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3150 jme_stats_clear(struct jme_softc *sc)
3153 JME_LOCK_ASSERT(sc);
3155 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3158 /* Disable and clear counters. */
3159 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3160 /* Activate hw counters. */
3161 CSR_WRITE_4(sc, JME_STATCSR, 0);
3162 CSR_READ_4(sc, JME_STATCSR);
3163 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3167 jme_stats_save(struct jme_softc *sc)
3170 JME_LOCK_ASSERT(sc);
3172 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3174 /* Save current counters. */
3175 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3176 /* Disable and clear counters. */
3177 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3181 jme_stats_update(struct jme_softc *sc)
3183 struct jme_hw_stats *stat, *ostat;
3186 JME_LOCK_ASSERT(sc);
3188 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3190 stat = &sc->jme_stats;
3191 ostat = &sc->jme_ostats;
3192 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3193 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3194 reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3195 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3196 STAT_RX_CRC_ERR_SHIFT;
3197 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3198 STAT_RX_MII_ERR_SHIFT;
3199 reg = CSR_READ_4(sc, JME_STAT_RXERR);
3200 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3201 STAT_RXERR_OFLOW_SHIFT;
3202 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3203 STAT_RXERR_MPTY_SHIFT;
3204 reg = CSR_READ_4(sc, JME_STAT_FAIL);
3205 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3206 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3208 /* Account for previous counters. */
3209 stat->rx_good_frames += ostat->rx_good_frames;
3210 stat->rx_crc_errs += ostat->rx_crc_errs;
3211 stat->rx_mii_errs += ostat->rx_mii_errs;
3212 stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3213 stat->rx_desc_empty += ostat->rx_desc_empty;
3214 stat->rx_bad_frames += ostat->rx_bad_frames;
3215 stat->tx_good_frames += ostat->tx_good_frames;
3216 stat->tx_bad_frames += ostat->tx_bad_frames;
3220 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3226 value = *(int *)arg1;
3227 error = sysctl_handle_int(oidp, &value, 0, req);
3228 if (error || req->newptr == NULL)
3230 if (value < low || value > high)
3232 *(int *)arg1 = value;
3238 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3240 return (sysctl_int_range(oidp, arg1, arg2, req,
3241 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3245 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3247 return (sysctl_int_range(oidp, arg1, arg2, req,
3248 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3252 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3254 return (sysctl_int_range(oidp, arg1, arg2, req,
3255 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3259 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3261 return (sysctl_int_range(oidp, arg1, arg2, req,
3262 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3266 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3268 return (sysctl_int_range(oidp, arg1, arg2, req,
3269 JME_PROC_MIN, JME_PROC_MAX));