2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
39 #include <sys/module.h>
41 #include <sys/queue.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
67 #include <machine/bus.h>
68 #include <machine/in_cksum.h>
70 #include <dev/jme/if_jmereg.h>
71 #include <dev/jme/if_jmevar.h>
73 /* "device miibus" required. See GENERIC if you get errors here. */
74 #include "miibus_if.h"
76 /* Define the following to disable printing Rx errors. */
77 #undef JME_SHOW_ERRORS
79 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
81 MODULE_DEPEND(jme, pci, 1, 1, 1);
82 MODULE_DEPEND(jme, ether, 1, 1, 1);
83 MODULE_DEPEND(jme, miibus, 1, 1, 1);
86 static int msi_disable = 0;
87 static int msix_disable = 0;
88 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
89 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
92 * Devices supported by this driver.
94 static struct jme_dev {
95 uint16_t jme_vendorid;
96 uint16_t jme_deviceid;
99 { VENDORID_JMICRON, DEVICEID_JMC250,
100 "JMicron Inc, JMC25x Gigabit Ethernet" },
101 { VENDORID_JMICRON, DEVICEID_JMC260,
102 "JMicron Inc, JMC26x Fast Ethernet" },
105 static int jme_miibus_readreg(device_t, int, int);
106 static int jme_miibus_writereg(device_t, int, int, int);
107 static void jme_miibus_statchg(device_t);
108 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
109 static int jme_mediachange(struct ifnet *);
110 static int jme_probe(device_t);
111 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
112 static int jme_eeprom_macaddr(struct jme_softc *);
113 static int jme_efuse_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_set_macaddr(struct jme_softc *, uint8_t *);
116 static void jme_map_intr_vector(struct jme_softc *);
117 static int jme_attach(device_t);
118 static int jme_detach(device_t);
119 static void jme_sysctl_node(struct jme_softc *);
120 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
121 static int jme_dma_alloc(struct jme_softc *);
122 static void jme_dma_free(struct jme_softc *);
123 static int jme_shutdown(device_t);
124 static void jme_setlinkspeed(struct jme_softc *);
125 static void jme_setwol(struct jme_softc *);
126 static int jme_suspend(device_t);
127 static int jme_resume(device_t);
128 static int jme_encap(struct jme_softc *, struct mbuf **);
129 static void jme_start(struct ifnet *);
130 static void jme_start_locked(struct ifnet *);
131 static void jme_watchdog(struct jme_softc *);
132 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
133 static void jme_mac_config(struct jme_softc *);
134 static void jme_link_task(void *, int);
135 static int jme_intr(void *);
136 static void jme_int_task(void *, int);
137 static void jme_txeof(struct jme_softc *);
138 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
139 static void jme_rxeof(struct jme_softc *);
140 static int jme_rxintr(struct jme_softc *, int);
141 static void jme_tick(void *);
142 static void jme_reset(struct jme_softc *);
143 static void jme_init(void *);
144 static void jme_init_locked(struct jme_softc *);
145 static void jme_stop(struct jme_softc *);
146 static void jme_stop_tx(struct jme_softc *);
147 static void jme_stop_rx(struct jme_softc *);
148 static int jme_init_rx_ring(struct jme_softc *);
149 static void jme_init_tx_ring(struct jme_softc *);
150 static void jme_init_ssb(struct jme_softc *);
151 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
152 static void jme_set_vlan(struct jme_softc *);
153 static void jme_set_filter(struct jme_softc *);
154 static void jme_stats_clear(struct jme_softc *);
155 static void jme_stats_save(struct jme_softc *);
156 static void jme_stats_update(struct jme_softc *);
157 static void jme_phy_down(struct jme_softc *);
158 static void jme_phy_up(struct jme_softc *);
159 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
160 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
161 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
162 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
163 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
164 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
167 static device_method_t jme_methods[] = {
168 /* Device interface. */
169 DEVMETHOD(device_probe, jme_probe),
170 DEVMETHOD(device_attach, jme_attach),
171 DEVMETHOD(device_detach, jme_detach),
172 DEVMETHOD(device_shutdown, jme_shutdown),
173 DEVMETHOD(device_suspend, jme_suspend),
174 DEVMETHOD(device_resume, jme_resume),
177 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
178 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
179 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
184 static driver_t jme_driver = {
187 sizeof(struct jme_softc)
190 static devclass_t jme_devclass;
192 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0);
193 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
195 static struct resource_spec jme_res_spec_mem[] = {
196 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
200 static struct resource_spec jme_irq_spec_legacy[] = {
201 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
205 static struct resource_spec jme_irq_spec_msi[] = {
206 { SYS_RES_IRQ, 1, RF_ACTIVE },
211 * Read a PHY register on the MII of the JMC250.
214 jme_miibus_readreg(device_t dev, int phy, int reg)
216 struct jme_softc *sc;
220 sc = device_get_softc(dev);
222 /* For FPGA version, PHY address 0 should be ignored. */
223 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
226 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
227 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
228 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
230 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
235 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
239 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
243 * Write a PHY register on the MII of the JMC250.
246 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
248 struct jme_softc *sc;
251 sc = device_get_softc(dev);
253 /* For FPGA version, PHY address 0 should be ignored. */
254 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
257 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
258 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
259 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
260 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
262 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
267 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
273 * Callback from MII layer when media changes.
276 jme_miibus_statchg(device_t dev)
278 struct jme_softc *sc;
280 sc = device_get_softc(dev);
281 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
285 * Get the current interface media status.
288 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
290 struct jme_softc *sc;
291 struct mii_data *mii;
295 if ((ifp->if_flags & IFF_UP) == 0) {
299 mii = device_get_softc(sc->jme_miibus);
302 ifmr->ifm_status = mii->mii_media_status;
303 ifmr->ifm_active = mii->mii_media_active;
308 * Set hardware to newly-selected media.
311 jme_mediachange(struct ifnet *ifp)
313 struct jme_softc *sc;
314 struct mii_data *mii;
315 struct mii_softc *miisc;
320 mii = device_get_softc(sc->jme_miibus);
321 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
322 mii_phy_reset(miisc);
323 error = mii_mediachg(mii);
330 jme_probe(device_t dev)
334 uint16_t vendor, devid;
336 vendor = pci_get_vendor(dev);
337 devid = pci_get_device(dev);
339 for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]);
341 if (vendor == sp->jme_vendorid &&
342 devid == sp->jme_deviceid) {
343 device_set_desc(dev, sp->jme_name);
344 return (BUS_PROBE_DEFAULT);
352 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
358 for (i = JME_TIMEOUT; i > 0; i--) {
359 reg = CSR_READ_4(sc, JME_SMBCSR);
360 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
366 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
370 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
371 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
372 for (i = JME_TIMEOUT; i > 0; i--) {
374 reg = CSR_READ_4(sc, JME_SMBINTF);
375 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
380 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
384 reg = CSR_READ_4(sc, JME_SMBINTF);
385 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
391 jme_eeprom_macaddr(struct jme_softc *sc)
393 uint8_t eaddr[ETHER_ADDR_LEN];
394 uint8_t fup, reg, val;
399 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
400 fup != JME_EEPROM_SIG0)
402 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
403 fup != JME_EEPROM_SIG1)
407 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
409 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
410 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
411 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
413 if (reg >= JME_PAR0 &&
414 reg < JME_PAR0 + ETHER_ADDR_LEN) {
415 if (jme_eeprom_read_byte(sc, offset + 2,
418 eaddr[reg - JME_PAR0] = val;
422 /* Check for the end of EEPROM descriptor. */
423 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
425 /* Try next eeprom descriptor. */
426 offset += JME_EEPROM_DESC_BYTES;
427 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
429 if (match == ETHER_ADDR_LEN) {
430 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
438 jme_efuse_macaddr(struct jme_softc *sc)
443 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
444 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) !=
445 EFUSE_CTL1_AUTOLAOD_DONE)
447 /* Reset eFuse controller. */
448 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
449 reg |= EFUSE_CTL2_RESET;
450 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
451 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
452 reg &= ~EFUSE_CTL2_RESET;
453 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
455 /* Have eFuse reload station address to MAC controller. */
456 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
457 reg &= ~EFUSE_CTL1_CMD_MASK;
458 reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE;
459 pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4);
462 * Verify completion of eFuse autload command. It should be
463 * completed within 108us.
466 for (i = 10; i > 0; i--) {
467 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
468 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR |
469 EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) {
473 if ((reg & EFUSE_CTL1_EXECUTE) == 0)
475 /* Station address loading is still in progress. */
479 device_printf(sc->jme_dev, "eFuse autoload timed out.\n");
487 jme_reg_macaddr(struct jme_softc *sc)
491 /* Read station address. */
492 par0 = CSR_READ_4(sc, JME_PAR0);
493 par1 = CSR_READ_4(sc, JME_PAR1);
495 if ((par0 == 0 && par1 == 0) ||
496 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
497 device_printf(sc->jme_dev,
498 "generating fake ethernet address.\n");
500 /* Set OUI to JMicron. */
501 sc->jme_eaddr[0] = 0x02; /* U/L bit set. */
502 sc->jme_eaddr[1] = 0x1B;
503 sc->jme_eaddr[2] = 0x8C;
504 sc->jme_eaddr[3] = (par0 >> 16) & 0xff;
505 sc->jme_eaddr[4] = (par0 >> 8) & 0xff;
506 sc->jme_eaddr[5] = par0 & 0xff;
509 * For controllers that use eFuse, the station address
510 * could also be extracted from JME_PCI_PAR0 and
511 * JME_PCI_PAR1 registers in PCI configuration space.
512 * Each register holds exactly half of station address(24bits)
513 * so use JME_PAR0, JME_PAR1 registers instead.
515 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
516 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
517 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
518 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
519 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
520 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
525 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr)
530 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
532 * Avoid reprogramming station address if the address
533 * is the same as previous one. Note, reprogrammed
534 * station address is permanent as if it was written
535 * to EEPROM. So if station address was changed by
536 * admistrator it's possible to lose factory configured
537 * address when driver fails to restore its address.
538 * (e.g. reboot or system crash)
540 if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) {
541 for (i = 0; i < ETHER_ADDR_LEN; i++) {
542 val = JME_EFUSE_EEPROM_FUNC0 <<
543 JME_EFUSE_EEPROM_FUNC_SHIFT;
544 val |= JME_EFUSE_EEPROM_PAGE_BAR1 <<
545 JME_EFUSE_EEPROM_PAGE_SHIFT;
546 val |= (JME_PAR0 + i) <<
547 JME_EFUSE_EEPROM_ADDR_SHIFT;
548 val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT;
549 pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM,
550 val | JME_EFUSE_EEPROM_WRITE, 4);
554 CSR_WRITE_4(sc, JME_PAR0,
555 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
556 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
561 jme_map_intr_vector(struct jme_softc *sc)
563 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
565 bzero(map, sizeof(map));
567 /* Map Tx interrupts source to MSI/MSIX vector 2. */
568 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
569 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
570 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
571 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
572 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
573 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
574 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
575 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
576 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
577 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
578 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
579 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
580 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
581 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
582 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
583 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
584 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
585 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
586 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
587 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
589 /* Map Rx interrupts source to MSI/MSIX vector 1. */
590 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
591 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
592 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
593 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
594 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
595 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
596 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
597 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
598 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
599 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
600 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
601 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
602 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
603 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
604 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
605 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
606 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
607 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
608 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
609 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
610 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
611 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
612 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
613 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
614 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
615 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
616 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
617 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
618 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
619 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
620 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
621 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
623 /* Map all other interrupts source to MSI/MSIX vector 0. */
624 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
625 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
626 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
627 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
631 jme_attach(device_t dev)
633 struct jme_softc *sc;
635 struct mii_softc *miisc;
636 struct mii_data *mii;
639 int error, i, mii_flags, msic, msixc, pmc;
642 sc = device_get_softc(dev);
645 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
647 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
648 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
649 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
652 * Map the device. JMC250 supports both memory mapped and I/O
653 * register space access. Because I/O register access should
654 * use different BARs to access registers it's waste of time
655 * to use I/O register spce access. JMC250 uses 16K to map
656 * entire memory space.
658 pci_enable_busmaster(dev);
659 sc->jme_res_spec = jme_res_spec_mem;
660 sc->jme_irq_spec = jme_irq_spec_legacy;
661 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
663 device_printf(dev, "cannot allocate memory resources.\n");
667 /* Allocate IRQ resources. */
668 msixc = pci_msix_count(dev);
669 msic = pci_msi_count(dev);
671 device_printf(dev, "MSIX count : %d\n", msixc);
672 device_printf(dev, "MSI count : %d\n", msic);
675 /* Use 1 MSI/MSI-X. */
680 /* Prefer MSIX over MSI. */
681 if (msix_disable == 0 || msi_disable == 0) {
682 if (msix_disable == 0 && msixc > 0 &&
683 pci_alloc_msix(dev, &msixc) == 0) {
685 device_printf(dev, "Using %d MSIX messages.\n",
687 sc->jme_flags |= JME_FLAG_MSIX;
688 sc->jme_irq_spec = jme_irq_spec_msi;
690 pci_release_msi(dev);
692 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
693 msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
695 device_printf(dev, "Using %d MSI messages.\n",
697 sc->jme_flags |= JME_FLAG_MSI;
698 sc->jme_irq_spec = jme_irq_spec_msi;
700 pci_release_msi(dev);
702 /* Map interrupt vector 0, 1 and 2. */
703 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
704 (sc->jme_flags & JME_FLAG_MSIX) != 0)
705 jme_map_intr_vector(sc);
708 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
710 device_printf(dev, "cannot allocate IRQ resources.\n");
714 sc->jme_rev = pci_get_device(dev);
715 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
716 sc->jme_flags |= JME_FLAG_FASTETH;
717 sc->jme_flags |= JME_FLAG_NOJUMBO;
719 reg = CSR_READ_4(sc, JME_CHIPMODE);
720 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
721 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
723 sc->jme_flags |= JME_FLAG_FPGA;
725 device_printf(dev, "PCI device revision : 0x%04x\n",
727 device_printf(dev, "Chip revision : 0x%02x\n",
729 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
730 device_printf(dev, "FPGA revision : 0x%04x\n",
731 (reg & CHIPMODE_FPGA_REV_MASK) >>
732 CHIPMODE_FPGA_REV_SHIFT);
734 if (sc->jme_chip_rev == 0xFF) {
735 device_printf(dev, "Unknown chip revision : 0x%02x\n",
741 /* Identify controller features and bugs. */
742 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
743 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
744 CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
745 sc->jme_flags |= JME_FLAG_DMA32BIT;
746 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
747 sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD;
748 sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK;
749 sc->jme_flags |= JME_FLAG_HWMIB;
752 /* Reset the ethernet controller. */
755 /* Get station address. */
756 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
757 error = jme_efuse_macaddr(sc);
762 reg = CSR_READ_4(sc, JME_SMBCSR);
763 if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
764 error = jme_eeprom_macaddr(sc);
765 if (error != 0 && bootverbose)
766 device_printf(sc->jme_dev,
767 "ethernet hardware address not found in EEPROM.\n");
774 * Integrated JR0211 has fixed PHY address whereas FPGA version
775 * requires PHY probing to get correct PHY address.
777 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
778 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
779 GPREG0_PHY_ADDR_MASK;
781 device_printf(dev, "PHY is at address %d.\n",
786 /* Set max allowable DMA size. */
787 if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
788 sc->jme_flags |= JME_FLAG_PCIE;
789 burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
791 device_printf(dev, "Read request size : %d bytes.\n",
792 128 << ((burst >> 12) & 0x07));
793 device_printf(dev, "TLP payload size : %d bytes.\n",
794 128 << ((burst >> 5) & 0x07));
796 switch ((burst >> 12) & 0x07) {
798 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
801 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
804 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
807 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
809 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
810 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
812 /* Create coalescing sysctl node. */
814 if ((error = jme_dma_alloc(sc) != 0))
817 ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
819 device_printf(dev, "cannot allocate ifnet structure.\n");
825 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
826 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
827 ifp->if_ioctl = jme_ioctl;
828 ifp->if_start = jme_start;
829 ifp->if_init = jme_init;
830 ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
831 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
832 IFQ_SET_READY(&ifp->if_snd);
833 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
834 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
835 ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
836 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
837 sc->jme_flags |= JME_FLAG_PMCAP;
838 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
840 ifp->if_capenable = ifp->if_capabilities;
844 mii_flags = MIIF_DOPAUSE;
845 /* Ask PHY calibration to PHY driver. */
846 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
847 mii_flags |= MIIF_MACPRIV0;
848 /* Set up MII bus. */
849 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange,
850 jme_mediastatus, BMSR_DEFCAPMASK,
851 sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
852 MII_OFFSET_ANY, mii_flags);
854 device_printf(dev, "attaching PHYs failed\n");
859 * Force PHY to FPGA mode.
861 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
862 mii = device_get_softc(sc->jme_miibus);
863 if (mii->mii_instance != 0) {
864 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
865 if (miisc->mii_phy != 0) {
866 sc->jme_phyaddr = miisc->mii_phy;
870 if (sc->jme_phyaddr != 0) {
871 device_printf(sc->jme_dev,
872 "FPGA PHY is at %d\n", sc->jme_phyaddr);
874 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
880 ether_ifattach(ifp, sc->jme_eaddr);
882 /* VLAN capability setup */
883 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
884 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
885 ifp->if_capenable = ifp->if_capabilities;
887 /* Tell the upper layer(s) we support long frames. */
888 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
890 /* Create local taskq. */
891 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
892 taskqueue_thread_enqueue, &sc->jme_tq);
893 if (sc->jme_tq == NULL) {
894 device_printf(dev, "could not create taskqueue.\n");
899 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
900 device_get_nameunit(sc->jme_dev));
902 for (i = 0; i < 1; i++) {
903 error = bus_setup_intr(dev, sc->jme_irq[i],
904 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
905 &sc->jme_intrhand[i]);
911 device_printf(dev, "could not set up interrupt handler.\n");
912 taskqueue_free(sc->jme_tq);
926 jme_detach(device_t dev)
928 struct jme_softc *sc;
932 sc = device_get_softc(dev);
935 if (device_is_attached(dev)) {
937 sc->jme_flags |= JME_FLAG_DETACH;
940 callout_drain(&sc->jme_tick_ch);
941 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
942 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
943 /* Restore possibly modified station address. */
944 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0)
945 jme_set_macaddr(sc, sc->jme_eaddr);
949 if (sc->jme_tq != NULL) {
950 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
951 taskqueue_free(sc->jme_tq);
955 if (sc->jme_miibus != NULL) {
956 device_delete_child(dev, sc->jme_miibus);
957 sc->jme_miibus = NULL;
959 bus_generic_detach(dev);
967 for (i = 0; i < 1; i++) {
968 if (sc->jme_intrhand[i] != NULL) {
969 bus_teardown_intr(dev, sc->jme_irq[i],
970 sc->jme_intrhand[i]);
971 sc->jme_intrhand[i] = NULL;
975 if (sc->jme_irq[0] != NULL)
976 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
977 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
978 pci_release_msi(dev);
979 if (sc->jme_res[0] != NULL)
980 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
981 mtx_destroy(&sc->jme_mtx);
986 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \
987 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
990 jme_sysctl_node(struct jme_softc *sc)
992 struct sysctl_ctx_list *ctx;
993 struct sysctl_oid_list *child, *parent;
994 struct sysctl_oid *tree;
995 struct jme_hw_stats *stats;
998 stats = &sc->jme_stats;
999 ctx = device_get_sysctl_ctx(sc->jme_dev);
1000 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
1002 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
1003 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to, 0,
1004 sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
1006 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
1007 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt, 0,
1008 sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
1010 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
1011 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to, 0,
1012 sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
1014 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
1015 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt, 0,
1016 sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
1018 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
1019 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit, 0,
1020 sysctl_hw_jme_proc_limit, "I",
1021 "max number of Rx events to process");
1023 /* Pull in device tunables. */
1024 sc->jme_process_limit = JME_PROC_DEFAULT;
1025 error = resource_int_value(device_get_name(sc->jme_dev),
1026 device_get_unit(sc->jme_dev), "process_limit",
1027 &sc->jme_process_limit);
1029 if (sc->jme_process_limit < JME_PROC_MIN ||
1030 sc->jme_process_limit > JME_PROC_MAX) {
1031 device_printf(sc->jme_dev,
1032 "process_limit value out of range; "
1033 "using default: %d\n", JME_PROC_DEFAULT);
1034 sc->jme_process_limit = JME_PROC_DEFAULT;
1038 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1039 error = resource_int_value(device_get_name(sc->jme_dev),
1040 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
1042 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
1043 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
1044 device_printf(sc->jme_dev,
1045 "tx_coal_to value out of range; "
1046 "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
1047 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1051 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1052 error = resource_int_value(device_get_name(sc->jme_dev),
1053 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
1055 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
1056 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
1057 device_printf(sc->jme_dev,
1058 "tx_coal_pkt value out of range; "
1059 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
1060 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1064 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1065 error = resource_int_value(device_get_name(sc->jme_dev),
1066 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
1068 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
1069 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
1070 device_printf(sc->jme_dev,
1071 "rx_coal_to value out of range; "
1072 "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
1073 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1077 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1078 error = resource_int_value(device_get_name(sc->jme_dev),
1079 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
1081 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
1082 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
1083 device_printf(sc->jme_dev,
1084 "tx_coal_pkt value out of range; "
1085 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
1086 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1090 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
1093 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1094 NULL, "JME statistics");
1095 parent = SYSCTL_CHILDREN(tree);
1097 /* Rx statistics. */
1098 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
1099 NULL, "Rx MAC statistics");
1100 child = SYSCTL_CHILDREN(tree);
1101 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1102 &stats->rx_good_frames, "Good frames");
1103 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1104 &stats->rx_crc_errs, "CRC errors");
1105 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1106 &stats->rx_mii_errs, "MII errors");
1107 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1108 &stats->rx_fifo_oflows, "FIFO overflows");
1109 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1110 &stats->rx_desc_empty, "Descriptor empty");
1111 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1112 &stats->rx_bad_frames, "Bad frames");
1114 /* Tx statistics. */
1115 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1116 NULL, "Tx MAC statistics");
1117 child = SYSCTL_CHILDREN(tree);
1118 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1119 &stats->tx_good_frames, "Good frames");
1120 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1121 &stats->tx_bad_frames, "Bad frames");
1124 #undef JME_SYSCTL_STAT_ADD32
1126 struct jme_dmamap_arg {
1127 bus_addr_t jme_busaddr;
1131 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1133 struct jme_dmamap_arg *ctx;
1138 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1140 ctx = (struct jme_dmamap_arg *)arg;
1141 ctx->jme_busaddr = segs[0].ds_addr;
1145 jme_dma_alloc(struct jme_softc *sc)
1147 struct jme_dmamap_arg ctx;
1148 struct jme_txdesc *txd;
1149 struct jme_rxdesc *rxd;
1150 bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1153 lowaddr = BUS_SPACE_MAXADDR;
1154 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1155 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1158 /* Create parent ring tag. */
1159 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1160 1, 0, /* algnmnt, boundary */
1161 lowaddr, /* lowaddr */
1162 BUS_SPACE_MAXADDR, /* highaddr */
1163 NULL, NULL, /* filter, filterarg */
1164 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1166 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1168 NULL, NULL, /* lockfunc, lockarg */
1169 &sc->jme_cdata.jme_ring_tag);
1171 device_printf(sc->jme_dev,
1172 "could not create parent ring DMA tag.\n");
1175 /* Create tag for Tx ring. */
1176 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1177 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1178 BUS_SPACE_MAXADDR, /* lowaddr */
1179 BUS_SPACE_MAXADDR, /* highaddr */
1180 NULL, NULL, /* filter, filterarg */
1181 JME_TX_RING_SIZE, /* maxsize */
1183 JME_TX_RING_SIZE, /* maxsegsize */
1185 NULL, NULL, /* lockfunc, lockarg */
1186 &sc->jme_cdata.jme_tx_ring_tag);
1188 device_printf(sc->jme_dev,
1189 "could not allocate Tx ring DMA tag.\n");
1193 /* Create tag for Rx ring. */
1194 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1195 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1196 lowaddr, /* lowaddr */
1197 BUS_SPACE_MAXADDR, /* highaddr */
1198 NULL, NULL, /* filter, filterarg */
1199 JME_RX_RING_SIZE, /* maxsize */
1201 JME_RX_RING_SIZE, /* maxsegsize */
1203 NULL, NULL, /* lockfunc, lockarg */
1204 &sc->jme_cdata.jme_rx_ring_tag);
1206 device_printf(sc->jme_dev,
1207 "could not allocate Rx ring DMA tag.\n");
1211 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1212 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1213 (void **)&sc->jme_rdata.jme_tx_ring,
1214 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1215 &sc->jme_cdata.jme_tx_ring_map);
1217 device_printf(sc->jme_dev,
1218 "could not allocate DMA'able memory for Tx ring.\n");
1222 ctx.jme_busaddr = 0;
1223 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1224 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1225 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1226 if (error != 0 || ctx.jme_busaddr == 0) {
1227 device_printf(sc->jme_dev,
1228 "could not load DMA'able memory for Tx ring.\n");
1231 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1233 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1234 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1235 (void **)&sc->jme_rdata.jme_rx_ring,
1236 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1237 &sc->jme_cdata.jme_rx_ring_map);
1239 device_printf(sc->jme_dev,
1240 "could not allocate DMA'able memory for Rx ring.\n");
1244 ctx.jme_busaddr = 0;
1245 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1246 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1247 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1248 if (error != 0 || ctx.jme_busaddr == 0) {
1249 device_printf(sc->jme_dev,
1250 "could not load DMA'able memory for Rx ring.\n");
1253 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1255 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1256 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1257 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1259 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1261 if ((JME_ADDR_HI(tx_ring_end) !=
1262 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1263 (JME_ADDR_HI(rx_ring_end) !=
1264 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1265 device_printf(sc->jme_dev, "4GB boundary crossed, "
1266 "switching to 32bit DMA address mode.\n");
1268 /* Limit DMA address space to 32bit and try again. */
1269 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1274 lowaddr = BUS_SPACE_MAXADDR;
1275 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1276 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1277 /* Create parent buffer tag. */
1278 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1279 1, 0, /* algnmnt, boundary */
1280 lowaddr, /* lowaddr */
1281 BUS_SPACE_MAXADDR, /* highaddr */
1282 NULL, NULL, /* filter, filterarg */
1283 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1285 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1287 NULL, NULL, /* lockfunc, lockarg */
1288 &sc->jme_cdata.jme_buffer_tag);
1290 device_printf(sc->jme_dev,
1291 "could not create parent buffer DMA tag.\n");
1295 /* Create shadow status block tag. */
1296 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1297 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1298 BUS_SPACE_MAXADDR, /* lowaddr */
1299 BUS_SPACE_MAXADDR, /* highaddr */
1300 NULL, NULL, /* filter, filterarg */
1301 JME_SSB_SIZE, /* maxsize */
1303 JME_SSB_SIZE, /* maxsegsize */
1305 NULL, NULL, /* lockfunc, lockarg */
1306 &sc->jme_cdata.jme_ssb_tag);
1308 device_printf(sc->jme_dev,
1309 "could not create shared status block DMA tag.\n");
1313 /* Create tag for Tx buffers. */
1314 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1315 1, 0, /* algnmnt, boundary */
1316 BUS_SPACE_MAXADDR, /* lowaddr */
1317 BUS_SPACE_MAXADDR, /* highaddr */
1318 NULL, NULL, /* filter, filterarg */
1319 JME_TSO_MAXSIZE, /* maxsize */
1320 JME_MAXTXSEGS, /* nsegments */
1321 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1323 NULL, NULL, /* lockfunc, lockarg */
1324 &sc->jme_cdata.jme_tx_tag);
1326 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1330 /* Create tag for Rx buffers. */
1331 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1332 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1333 BUS_SPACE_MAXADDR, /* lowaddr */
1334 BUS_SPACE_MAXADDR, /* highaddr */
1335 NULL, NULL, /* filter, filterarg */
1336 MCLBYTES, /* maxsize */
1338 MCLBYTES, /* maxsegsize */
1340 NULL, NULL, /* lockfunc, lockarg */
1341 &sc->jme_cdata.jme_rx_tag);
1343 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1348 * Allocate DMA'able memory and load the DMA map for shared
1351 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1352 (void **)&sc->jme_rdata.jme_ssb_block,
1353 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1354 &sc->jme_cdata.jme_ssb_map);
1356 device_printf(sc->jme_dev, "could not allocate DMA'able "
1357 "memory for shared status block.\n");
1361 ctx.jme_busaddr = 0;
1362 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1363 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1364 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1365 if (error != 0 || ctx.jme_busaddr == 0) {
1366 device_printf(sc->jme_dev, "could not load DMA'able memory "
1367 "for shared status block.\n");
1370 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1372 /* Create DMA maps for Tx buffers. */
1373 for (i = 0; i < JME_TX_RING_CNT; i++) {
1374 txd = &sc->jme_cdata.jme_txdesc[i];
1376 txd->tx_dmamap = NULL;
1377 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1380 device_printf(sc->jme_dev,
1381 "could not create Tx dmamap.\n");
1385 /* Create DMA maps for Rx buffers. */
1386 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1387 &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1388 device_printf(sc->jme_dev,
1389 "could not create spare Rx dmamap.\n");
1392 for (i = 0; i < JME_RX_RING_CNT; i++) {
1393 rxd = &sc->jme_cdata.jme_rxdesc[i];
1395 rxd->rx_dmamap = NULL;
1396 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1399 device_printf(sc->jme_dev,
1400 "could not create Rx dmamap.\n");
1410 jme_dma_free(struct jme_softc *sc)
1412 struct jme_txdesc *txd;
1413 struct jme_rxdesc *rxd;
1417 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1418 if (sc->jme_cdata.jme_tx_ring_map)
1419 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1420 sc->jme_cdata.jme_tx_ring_map);
1421 if (sc->jme_cdata.jme_tx_ring_map &&
1422 sc->jme_rdata.jme_tx_ring)
1423 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1424 sc->jme_rdata.jme_tx_ring,
1425 sc->jme_cdata.jme_tx_ring_map);
1426 sc->jme_rdata.jme_tx_ring = NULL;
1427 sc->jme_cdata.jme_tx_ring_map = NULL;
1428 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1429 sc->jme_cdata.jme_tx_ring_tag = NULL;
1432 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1433 if (sc->jme_cdata.jme_rx_ring_map)
1434 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1435 sc->jme_cdata.jme_rx_ring_map);
1436 if (sc->jme_cdata.jme_rx_ring_map &&
1437 sc->jme_rdata.jme_rx_ring)
1438 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1439 sc->jme_rdata.jme_rx_ring,
1440 sc->jme_cdata.jme_rx_ring_map);
1441 sc->jme_rdata.jme_rx_ring = NULL;
1442 sc->jme_cdata.jme_rx_ring_map = NULL;
1443 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1444 sc->jme_cdata.jme_rx_ring_tag = NULL;
1447 if (sc->jme_cdata.jme_tx_tag != NULL) {
1448 for (i = 0; i < JME_TX_RING_CNT; i++) {
1449 txd = &sc->jme_cdata.jme_txdesc[i];
1450 if (txd->tx_dmamap != NULL) {
1451 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1453 txd->tx_dmamap = NULL;
1456 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1457 sc->jme_cdata.jme_tx_tag = NULL;
1460 if (sc->jme_cdata.jme_rx_tag != NULL) {
1461 for (i = 0; i < JME_RX_RING_CNT; i++) {
1462 rxd = &sc->jme_cdata.jme_rxdesc[i];
1463 if (rxd->rx_dmamap != NULL) {
1464 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1466 rxd->rx_dmamap = NULL;
1469 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1470 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1471 sc->jme_cdata.jme_rx_sparemap);
1472 sc->jme_cdata.jme_rx_sparemap = NULL;
1474 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1475 sc->jme_cdata.jme_rx_tag = NULL;
1478 /* Shared status block. */
1479 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1480 if (sc->jme_cdata.jme_ssb_map)
1481 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1482 sc->jme_cdata.jme_ssb_map);
1483 if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block)
1484 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1485 sc->jme_rdata.jme_ssb_block,
1486 sc->jme_cdata.jme_ssb_map);
1487 sc->jme_rdata.jme_ssb_block = NULL;
1488 sc->jme_cdata.jme_ssb_map = NULL;
1489 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1490 sc->jme_cdata.jme_ssb_tag = NULL;
1493 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1494 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1495 sc->jme_cdata.jme_buffer_tag = NULL;
1497 if (sc->jme_cdata.jme_ring_tag != NULL) {
1498 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1499 sc->jme_cdata.jme_ring_tag = NULL;
1504 * Make sure the interface is stopped at reboot time.
1507 jme_shutdown(device_t dev)
1510 return (jme_suspend(dev));
1514 * Unlike other ethernet controllers, JMC250 requires
1515 * explicit resetting link speed to 10/100Mbps as gigabit
1516 * link will cunsume more power than 375mA.
1517 * Note, we reset the link speed to 10/100Mbps with
1518 * auto-negotiation but we don't know whether that operation
1519 * would succeed or not as we have no control after powering
1520 * off. If the renegotiation fail WOL may not work. Running
1521 * at 1Gbps draws more power than 375mA at 3.3V which is
1522 * specified in PCI specification and that would result in
1523 * complete shutdowning power to ethernet controller.
1526 * Save current negotiated media speed/duplex/flow-control
1527 * to softc and restore the same link again after resuming.
1528 * PHY handling such as power down/resetting to 100Mbps
1529 * may be better handled in suspend method in phy driver.
1532 jme_setlinkspeed(struct jme_softc *sc)
1534 struct mii_data *mii;
1537 JME_LOCK_ASSERT(sc);
1539 mii = device_get_softc(sc->jme_miibus);
1542 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1543 switch IFM_SUBTYPE(mii->mii_media_active) {
1553 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1554 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1555 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1556 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1557 BMCR_AUTOEN | BMCR_STARTNEG);
1560 /* Poll link state until jme(4) get a 10/100 link. */
1561 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1563 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1564 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1574 pause("jmelnk", hz);
1577 if (i == MII_ANEGTICKS_GIGE)
1578 device_printf(sc->jme_dev, "establishing link failed, "
1579 "WOL may not work!");
1582 * No link, force MAC to have 100Mbps, full-duplex link.
1583 * This is the last resort and may/may not work.
1585 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1586 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1591 jme_setwol(struct jme_softc *sc)
1598 JME_LOCK_ASSERT(sc);
1600 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1601 /* Remove Tx MAC/offload clock to save more power. */
1602 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1603 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1604 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1605 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1606 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
1607 CSR_WRITE_4(sc, JME_GPREG1,
1608 CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS);
1609 /* No PME capability, PHY power down. */
1615 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1616 pmcs = CSR_READ_4(sc, JME_PMCS);
1617 pmcs &= ~PMCS_WOL_ENB_MASK;
1618 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1619 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1620 /* Enable PME message. */
1621 gpr |= GPREG0_PME_ENB;
1622 /* For gigabit controllers, reset link speed to 10/100. */
1623 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1624 jme_setlinkspeed(sc);
1627 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1628 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1629 /* Remove Tx MAC/offload clock to save more power. */
1630 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1631 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1632 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1633 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1635 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1636 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1637 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1638 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1639 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1640 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1641 /* No WOL, PHY power down. */
1647 jme_suspend(device_t dev)
1649 struct jme_softc *sc;
1651 sc = device_get_softc(dev);
1662 jme_resume(device_t dev)
1664 struct jme_softc *sc;
1669 sc = device_get_softc(dev);
1672 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) == 0) {
1673 pmstat = pci_read_config(sc->jme_dev,
1674 pmc + PCIR_POWER_STATUS, 2);
1675 /* Disable PME clear PME status. */
1676 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1677 pci_write_config(sc->jme_dev,
1678 pmc + PCIR_POWER_STATUS, pmstat, 2);
1683 if ((ifp->if_flags & IFF_UP) != 0) {
1684 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1685 jme_init_locked(sc);
1694 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1696 struct jme_txdesc *txd;
1697 struct jme_desc *desc;
1699 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1700 int error, i, nsegs, prod;
1701 uint32_t cflags, tso_segsz;
1703 JME_LOCK_ASSERT(sc);
1705 M_ASSERTPKTHDR((*m_head));
1707 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1709 * Due to the adherence to NDIS specification JMC250
1710 * assumes upper stack computed TCP pseudo checksum
1711 * without including payload length. This breaks
1712 * checksum offload for TSO case so recompute TCP
1713 * pseudo checksum for JMC250. Hopefully this wouldn't
1714 * be much burden on modern CPUs.
1716 struct ether_header *eh;
1719 uint32_t ip_off, poff;
1721 if (M_WRITABLE(*m_head) == 0) {
1722 /* Get a writable copy. */
1723 m = m_dup(*m_head, M_DONTWAIT);
1731 ip_off = sizeof(struct ether_header);
1732 m = m_pullup(*m_head, ip_off);
1737 eh = mtod(m, struct ether_header *);
1738 /* Check the existence of VLAN tag. */
1739 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1740 ip_off = sizeof(struct ether_vlan_header);
1741 m = m_pullup(m, ip_off);
1747 m = m_pullup(m, ip_off + sizeof(struct ip));
1752 ip = (struct ip *)(mtod(m, char *) + ip_off);
1753 poff = ip_off + (ip->ip_hl << 2);
1754 m = m_pullup(m, poff + sizeof(struct tcphdr));
1760 * Reset IP checksum and recompute TCP pseudo
1761 * checksum that NDIS specification requires.
1763 ip = (struct ip *)(mtod(m, char *) + ip_off);
1764 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1766 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1767 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1769 htons((tcp->th_off << 2) + IPPROTO_TCP));
1770 /* No need to TSO, force IP checksum offload. */
1771 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1772 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1774 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1775 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1779 prod = sc->jme_cdata.jme_tx_prod;
1780 txd = &sc->jme_cdata.jme_txdesc[prod];
1782 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1783 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1784 if (error == EFBIG) {
1785 m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS);
1792 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1793 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1799 } else if (error != 0)
1808 * Check descriptor overrun. Leave one free descriptor.
1809 * Since we always use 64bit address mode for transmitting,
1810 * each Tx request requires one more dummy descriptor.
1812 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1813 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1820 /* Configure checksum offload and TSO. */
1821 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1822 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1824 cflags |= JME_TD_TSO;
1826 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1827 cflags |= JME_TD_IPCSUM;
1828 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1829 cflags |= JME_TD_TCPCSUM;
1830 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1831 cflags |= JME_TD_UDPCSUM;
1833 /* Configure VLAN. */
1834 if ((m->m_flags & M_VLANTAG) != 0) {
1835 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1836 cflags |= JME_TD_VLAN_TAG;
1839 desc = &sc->jme_rdata.jme_tx_ring[prod];
1840 desc->flags = htole32(cflags);
1841 desc->buflen = htole32(tso_segsz);
1842 desc->addr_hi = htole32(m->m_pkthdr.len);
1844 sc->jme_cdata.jme_tx_cnt++;
1845 JME_DESC_INC(prod, JME_TX_RING_CNT);
1846 for (i = 0; i < nsegs; i++) {
1847 desc = &sc->jme_rdata.jme_tx_ring[prod];
1848 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1849 desc->buflen = htole32(txsegs[i].ds_len);
1850 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1851 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1852 sc->jme_cdata.jme_tx_cnt++;
1853 JME_DESC_INC(prod, JME_TX_RING_CNT);
1856 /* Update producer index. */
1857 sc->jme_cdata.jme_tx_prod = prod;
1859 * Finally request interrupt and give the first descriptor
1860 * owenership to hardware.
1862 desc = txd->tx_desc;
1863 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1866 txd->tx_ndesc = nsegs + 1;
1868 /* Sync descriptors. */
1869 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1870 BUS_DMASYNC_PREWRITE);
1871 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1872 sc->jme_cdata.jme_tx_ring_map,
1873 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1879 jme_start(struct ifnet *ifp)
1881 struct jme_softc *sc;
1885 jme_start_locked(ifp);
1890 jme_start_locked(struct ifnet *ifp)
1892 struct jme_softc *sc;
1893 struct mbuf *m_head;
1898 JME_LOCK_ASSERT(sc);
1900 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1903 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1904 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0)
1907 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1908 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1912 * Pack the data into the transmit ring. If we
1913 * don't have room, set the OACTIVE flag and wait
1914 * for the NIC to drain the ring.
1916 if (jme_encap(sc, &m_head)) {
1919 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1920 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1926 * If there's a BPF listener, bounce a copy of this frame
1929 ETHER_BPF_MTAP(ifp, m_head);
1934 * Reading TXCSR takes very long time under heavy load
1935 * so cache TXCSR value and writes the ORed value with
1936 * the kick command to the TXCSR. This saves one register
1939 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1940 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1941 /* Set a timeout in case the chip goes out to lunch. */
1942 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1947 jme_watchdog(struct jme_softc *sc)
1951 JME_LOCK_ASSERT(sc);
1953 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1957 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1958 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1960 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1961 jme_init_locked(sc);
1965 if (sc->jme_cdata.jme_tx_cnt == 0) {
1966 if_printf(sc->jme_ifp,
1967 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1968 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1969 jme_start_locked(ifp);
1973 if_printf(sc->jme_ifp, "watchdog timeout\n");
1975 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1976 jme_init_locked(sc);
1977 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1978 jme_start_locked(ifp);
1982 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1984 struct jme_softc *sc;
1986 struct mii_data *mii;
1991 ifr = (struct ifreq *)data;
1995 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1996 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1997 ifr->ifr_mtu > JME_MAX_MTU)) {
2002 if (ifp->if_mtu != ifr->ifr_mtu) {
2004 * No special configuration is required when interface
2005 * MTU is changed but availability of TSO/Tx checksum
2006 * offload should be chcked against new MTU size as
2007 * FIFO size is just 2K.
2010 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
2011 ifp->if_capenable &=
2012 ~(IFCAP_TXCSUM | IFCAP_TSO4);
2014 ~(JME_CSUM_FEATURES | CSUM_TSO);
2015 VLAN_CAPABILITIES(ifp);
2017 ifp->if_mtu = ifr->ifr_mtu;
2018 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2019 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2020 jme_init_locked(sc);
2027 if ((ifp->if_flags & IFF_UP) != 0) {
2028 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2029 if (((ifp->if_flags ^ sc->jme_if_flags)
2030 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2033 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
2034 jme_init_locked(sc);
2037 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2040 sc->jme_if_flags = ifp->if_flags;
2046 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2052 mii = device_get_softc(sc->jme_miibus);
2053 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2057 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2058 if ((mask & IFCAP_TXCSUM) != 0 &&
2059 ifp->if_mtu < JME_TX_FIFO_SIZE) {
2060 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2061 ifp->if_capenable ^= IFCAP_TXCSUM;
2062 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2063 ifp->if_hwassist |= JME_CSUM_FEATURES;
2065 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
2068 if ((mask & IFCAP_RXCSUM) != 0 &&
2069 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
2070 ifp->if_capenable ^= IFCAP_RXCSUM;
2071 reg = CSR_READ_4(sc, JME_RXMAC);
2072 reg &= ~RXMAC_CSUM_ENB;
2073 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2074 reg |= RXMAC_CSUM_ENB;
2075 CSR_WRITE_4(sc, JME_RXMAC, reg);
2077 if ((mask & IFCAP_TSO4) != 0 &&
2078 ifp->if_mtu < JME_TX_FIFO_SIZE) {
2079 if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
2080 ifp->if_capenable ^= IFCAP_TSO4;
2081 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
2082 ifp->if_hwassist |= CSUM_TSO;
2084 ifp->if_hwassist &= ~CSUM_TSO;
2087 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2088 (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
2089 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2090 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2091 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2092 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2093 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2094 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2095 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2096 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2097 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2098 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2102 VLAN_CAPABILITIES(ifp);
2105 error = ether_ioctl(ifp, cmd, data);
2113 jme_mac_config(struct jme_softc *sc)
2115 struct mii_data *mii;
2116 uint32_t ghc, gpreg, rxmac, txmac, txpause;
2119 JME_LOCK_ASSERT(sc);
2121 mii = device_get_softc(sc->jme_miibus);
2123 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2125 CSR_WRITE_4(sc, JME_GHC, 0);
2128 rxmac = CSR_READ_4(sc, JME_RXMAC);
2129 rxmac &= ~RXMAC_FC_ENB;
2130 txmac = CSR_READ_4(sc, JME_TXMAC);
2131 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2132 txpause = CSR_READ_4(sc, JME_TXPFC);
2133 txpause &= ~TXPFC_PAUSE_ENB;
2134 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2135 ghc |= GHC_FULL_DUPLEX;
2136 rxmac &= ~RXMAC_COLL_DET_ENB;
2137 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2138 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2140 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2141 txpause |= TXPFC_PAUSE_ENB;
2142 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2143 rxmac |= RXMAC_FC_ENB;
2144 /* Disable retry transmit timer/retry limit. */
2145 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2146 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2148 rxmac |= RXMAC_COLL_DET_ENB;
2149 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2150 /* Enable retry transmit timer/retry limit. */
2151 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2152 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2154 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2155 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2157 ghc |= GHC_SPEED_10;
2158 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2161 ghc |= GHC_SPEED_100;
2162 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2165 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2167 ghc |= GHC_SPEED_1000;
2168 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2169 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2170 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2175 if (sc->jme_rev == DEVICEID_JMC250 &&
2176 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2178 * Workaround occasional packet loss issue of JMC250 A2
2179 * when it runs on half-duplex media.
2181 gpreg = CSR_READ_4(sc, JME_GPREG1);
2182 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2183 gpreg &= ~GPREG1_HDPX_FIX;
2185 gpreg |= GPREG1_HDPX_FIX;
2186 CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2187 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
2188 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2189 /* Extend interface FIFO depth. */
2190 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2193 /* Select default interface FIFO depth. */
2194 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2198 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2200 CSR_WRITE_4(sc, JME_GHC, ghc);
2201 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2202 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2203 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2207 jme_link_task(void *arg, int pending)
2209 struct jme_softc *sc;
2210 struct mii_data *mii;
2212 struct jme_txdesc *txd;
2216 sc = (struct jme_softc *)arg;
2219 mii = device_get_softc(sc->jme_miibus);
2221 if (mii == NULL || ifp == NULL ||
2222 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2227 sc->jme_flags &= ~JME_FLAG_LINK;
2228 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2229 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2232 sc->jme_flags |= JME_FLAG_LINK;
2235 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2237 sc->jme_flags |= JME_FLAG_LINK;
2245 * Disabling Rx/Tx MACs have a side-effect of resetting
2246 * JME_TXNDA/JME_RXNDA register to the first address of
2247 * Tx/Rx descriptor address. So driver should reset its
2248 * internal procucer/consumer pointer and reclaim any
2249 * allocated resources. Note, just saving the value of
2250 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2251 * and restoring JME_TXNDA/JME_RXNDA register is not
2252 * sufficient to make sure correct MAC state because
2253 * stopping MAC operation can take a while and hardware
2254 * might have updated JME_TXNDA/JME_RXNDA registers
2255 * during the stop operation.
2257 /* Block execution of task. */
2258 taskqueue_block(sc->jme_tq);
2259 /* Disable interrupts and stop driver. */
2260 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2261 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2262 callout_stop(&sc->jme_tick_ch);
2263 sc->jme_watchdog_timer = 0;
2265 /* Stop receiver/transmitter. */
2269 /* XXX Drain all queued tasks. */
2271 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2274 if (sc->jme_cdata.jme_rxhead != NULL)
2275 m_freem(sc->jme_cdata.jme_rxhead);
2276 JME_RXCHAIN_RESET(sc);
2278 if (sc->jme_cdata.jme_tx_cnt != 0) {
2279 /* Remove queued packets for transmit. */
2280 for (i = 0; i < JME_TX_RING_CNT; i++) {
2281 txd = &sc->jme_cdata.jme_txdesc[i];
2282 if (txd->tx_m != NULL) {
2284 sc->jme_cdata.jme_tx_tag,
2286 BUS_DMASYNC_POSTWRITE);
2288 sc->jme_cdata.jme_tx_tag,
2299 * Reuse configured Rx descriptors and reset
2300 * producer/consumer index.
2302 sc->jme_cdata.jme_rx_cons = 0;
2303 sc->jme_morework = 0;
2304 jme_init_tx_ring(sc);
2305 /* Initialize shadow status block. */
2308 /* Program MAC with resolved speed/duplex/flow-control. */
2309 if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2311 jme_stats_clear(sc);
2313 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2314 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2316 /* Set Tx ring address to the hardware. */
2317 paddr = JME_TX_RING_ADDR(sc, 0);
2318 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2319 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2321 /* Set Rx ring address to the hardware. */
2322 paddr = JME_RX_RING_ADDR(sc, 0);
2323 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2324 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2326 /* Restart receiver/transmitter. */
2327 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2329 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2330 /* Lastly enable TX/RX clock. */
2331 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2332 CSR_WRITE_4(sc, JME_GHC,
2333 CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS);
2334 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
2335 CSR_WRITE_4(sc, JME_GPREG1,
2336 CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS);
2339 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2340 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2341 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2342 /* Unblock execution of task. */
2343 taskqueue_unblock(sc->jme_tq);
2344 /* Reenable interrupts. */
2345 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2353 struct jme_softc *sc;
2356 sc = (struct jme_softc *)arg;
2358 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2359 if (status == 0 || status == 0xFFFFFFFF)
2360 return (FILTER_STRAY);
2361 /* Disable interrupts. */
2362 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2363 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2365 return (FILTER_HANDLED);
2369 jme_int_task(void *arg, int pending)
2371 struct jme_softc *sc;
2376 sc = (struct jme_softc *)arg;
2380 status = CSR_READ_4(sc, JME_INTR_STATUS);
2381 if (sc->jme_morework != 0) {
2382 sc->jme_morework = 0;
2383 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2385 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2387 /* Reset PCC counter/timer and Ack interrupts. */
2388 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2389 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2390 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2391 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2392 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2393 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2395 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2396 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2397 more = jme_rxintr(sc, sc->jme_process_limit);
2399 sc->jme_morework = 1;
2401 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2403 * Notify hardware availability of new Rx
2405 * Reading RXCSR takes very long time under
2406 * heavy load so cache RXCSR value and writes
2407 * the ORed value with the kick command to
2408 * the RXCSR. This saves one register access
2411 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2412 RXCSR_RX_ENB | RXCSR_RXQ_START);
2414 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2415 jme_start_locked(ifp);
2418 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2419 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2426 /* Reenable interrupts. */
2427 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2431 jme_txeof(struct jme_softc *sc)
2434 struct jme_txdesc *txd;
2438 JME_LOCK_ASSERT(sc);
2442 cons = sc->jme_cdata.jme_tx_cons;
2443 if (cons == sc->jme_cdata.jme_tx_prod)
2446 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2447 sc->jme_cdata.jme_tx_ring_map,
2448 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2451 * Go through our Tx list and free mbufs for those
2452 * frames which have been transmitted.
2454 for (; cons != sc->jme_cdata.jme_tx_prod;) {
2455 txd = &sc->jme_cdata.jme_txdesc[cons];
2456 status = le32toh(txd->tx_desc->flags);
2457 if ((status & JME_TD_OWN) == JME_TD_OWN)
2460 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2464 if ((status & JME_TD_COLLISION) != 0)
2465 ifp->if_collisions +=
2466 le32toh(txd->tx_desc->buflen) &
2467 JME_TD_BUF_LEN_MASK;
2470 * Only the first descriptor of multi-descriptor
2471 * transmission is updated so driver have to skip entire
2472 * chained buffers for the transmiited frame. In other
2473 * words, JME_TD_OWN bit is valid only at the first
2474 * descriptor of a multi-descriptor transmission.
2476 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2477 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2478 JME_DESC_INC(cons, JME_TX_RING_CNT);
2481 /* Reclaim transferred mbufs. */
2482 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2483 BUS_DMASYNC_POSTWRITE);
2484 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2486 KASSERT(txd->tx_m != NULL,
2487 ("%s: freeing NULL mbuf!\n", __func__));
2490 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2491 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2492 ("%s: Active Tx desc counter was garbled\n", __func__));
2494 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2496 sc->jme_cdata.jme_tx_cons = cons;
2497 /* Unarm watchog timer when there is no pending descriptors in queue. */
2498 if (sc->jme_cdata.jme_tx_cnt == 0)
2499 sc->jme_watchdog_timer = 0;
2501 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2502 sc->jme_cdata.jme_tx_ring_map,
2503 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2506 static __inline void
2507 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2509 struct jme_desc *desc;
2511 desc = &sc->jme_rdata.jme_rx_ring[cons];
2512 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2513 desc->buflen = htole32(MCLBYTES);
2516 /* Receive a frame. */
2518 jme_rxeof(struct jme_softc *sc)
2521 struct jme_desc *desc;
2522 struct jme_rxdesc *rxd;
2523 struct mbuf *mp, *m;
2524 uint32_t flags, status;
2525 int cons, count, nsegs;
2527 JME_LOCK_ASSERT(sc);
2531 cons = sc->jme_cdata.jme_rx_cons;
2532 desc = &sc->jme_rdata.jme_rx_ring[cons];
2533 flags = le32toh(desc->flags);
2534 status = le32toh(desc->buflen);
2535 nsegs = JME_RX_NSEGS(status);
2536 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2537 if ((status & JME_RX_ERR_STAT) != 0) {
2539 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2540 #ifdef JME_SHOW_ERRORS
2541 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2542 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2544 sc->jme_cdata.jme_rx_cons += nsegs;
2545 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2549 for (count = 0; count < nsegs; count++,
2550 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2551 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2553 /* Add a new receive buffer to the ring. */
2554 if (jme_newbuf(sc, rxd) != 0) {
2557 for (; count < nsegs; count++) {
2558 jme_discard_rxbuf(sc, cons);
2559 JME_DESC_INC(cons, JME_RX_RING_CNT);
2561 if (sc->jme_cdata.jme_rxhead != NULL) {
2562 m_freem(sc->jme_cdata.jme_rxhead);
2563 JME_RXCHAIN_RESET(sc);
2569 * Assume we've received a full sized frame.
2570 * Actual size is fixed when we encounter the end of
2571 * multi-segmented frame.
2573 mp->m_len = MCLBYTES;
2575 /* Chain received mbufs. */
2576 if (sc->jme_cdata.jme_rxhead == NULL) {
2577 sc->jme_cdata.jme_rxhead = mp;
2578 sc->jme_cdata.jme_rxtail = mp;
2581 * Receive processor can receive a maximum frame
2582 * size of 65535 bytes.
2584 mp->m_flags &= ~M_PKTHDR;
2585 sc->jme_cdata.jme_rxtail->m_next = mp;
2586 sc->jme_cdata.jme_rxtail = mp;
2589 if (count == nsegs - 1) {
2590 /* Last desc. for this frame. */
2591 m = sc->jme_cdata.jme_rxhead;
2592 m->m_flags |= M_PKTHDR;
2593 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2595 /* Set first mbuf size. */
2596 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2597 /* Set last mbuf size. */
2598 mp->m_len = sc->jme_cdata.jme_rxlen -
2599 ((MCLBYTES - JME_RX_PAD_BYTES) +
2600 (MCLBYTES * (nsegs - 2)));
2602 m->m_len = sc->jme_cdata.jme_rxlen;
2603 m->m_pkthdr.rcvif = ifp;
2606 * Account for 10bytes auto padding which is used
2607 * to align IP header on 32bit boundary. Also note,
2608 * CRC bytes is automatically removed by the
2611 m->m_data += JME_RX_PAD_BYTES;
2613 /* Set checksum information. */
2614 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2615 (flags & JME_RD_IPV4) != 0) {
2616 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2617 if ((flags & JME_RD_IPCSUM) != 0)
2618 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2619 if (((flags & JME_RD_MORE_FRAG) == 0) &&
2620 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2621 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2622 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2623 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2624 m->m_pkthdr.csum_flags |=
2625 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2626 m->m_pkthdr.csum_data = 0xffff;
2630 /* Check for VLAN tagged packets. */
2631 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2632 (flags & JME_RD_VLAN_TAG) != 0) {
2633 m->m_pkthdr.ether_vtag =
2634 flags & JME_RD_VLAN_MASK;
2635 m->m_flags |= M_VLANTAG;
2641 (*ifp->if_input)(ifp, m);
2644 /* Reset mbuf chains. */
2645 JME_RXCHAIN_RESET(sc);
2649 sc->jme_cdata.jme_rx_cons += nsegs;
2650 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2654 jme_rxintr(struct jme_softc *sc, int count)
2656 struct jme_desc *desc;
2657 int nsegs, prog, pktlen;
2659 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2660 sc->jme_cdata.jme_rx_ring_map,
2661 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2663 for (prog = 0; count > 0; prog++) {
2664 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2665 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2667 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2669 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2671 * Check number of segments against received bytes.
2672 * Non-matching value would indicate that hardware
2673 * is still trying to update Rx descriptors. I'm not
2674 * sure whether this check is needed.
2676 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2677 if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES))
2680 /* Received a frame. */
2686 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2687 sc->jme_cdata.jme_rx_ring_map,
2688 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2690 return (count > 0 ? 0 : EAGAIN);
2696 struct jme_softc *sc;
2697 struct mii_data *mii;
2699 sc = (struct jme_softc *)arg;
2701 JME_LOCK_ASSERT(sc);
2703 mii = device_get_softc(sc->jme_miibus);
2706 * Reclaim Tx buffers that have been completed. It's not
2707 * needed here but it would release allocated mbuf chains
2708 * faster and limit the maximum delay to a hz.
2711 jme_stats_update(sc);
2713 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2717 jme_reset(struct jme_softc *sc)
2719 uint32_t ghc, gpreg;
2721 /* Stop receiver, transmitter. */
2725 /* Reset controller. */
2726 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2727 CSR_READ_4(sc, JME_GHC);
2730 * Workaround Rx FIFO overruns seen under certain conditions.
2731 * Explicitly synchorize TX/RX clock. TX/RX clock should be
2732 * enabled only after enabling TX/RX MACs.
2734 if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) {
2735 /* Disable TX clock. */
2736 CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS);
2737 /* Disable RX clock. */
2738 gpreg = CSR_READ_4(sc, JME_GPREG1);
2739 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2740 gpreg = CSR_READ_4(sc, JME_GPREG1);
2741 /* De-assert RESET but still disable TX clock. */
2742 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2743 ghc = CSR_READ_4(sc, JME_GHC);
2745 /* Enable TX clock. */
2746 CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS);
2747 /* Enable RX clock. */
2748 CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS);
2749 CSR_READ_4(sc, JME_GPREG1);
2751 /* Disable TX/RX clock again. */
2752 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2753 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2755 CSR_WRITE_4(sc, JME_GHC, 0);
2756 CSR_READ_4(sc, JME_GHC);
2763 struct jme_softc *sc;
2765 sc = (struct jme_softc *)xsc;
2767 jme_init_locked(sc);
2772 jme_init_locked(struct jme_softc *sc)
2775 struct mii_data *mii;
2780 JME_LOCK_ASSERT(sc);
2783 mii = device_get_softc(sc->jme_miibus);
2785 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2788 * Cancel any pending I/O.
2793 * Reset the chip to a known state.
2797 /* Init descriptors. */
2798 error = jme_init_rx_ring(sc);
2800 device_printf(sc->jme_dev,
2801 "%s: initialization failed: no memory for Rx buffers.\n",
2806 jme_init_tx_ring(sc);
2807 /* Initialize shadow status block. */
2810 /* Reprogram the station address. */
2811 jme_set_macaddr(sc, IF_LLADDR(sc->jme_ifp));
2814 * Configure Tx queue.
2815 * Tx priority queue weight value : 0
2816 * Tx FIFO threshold for processing next packet : 16QW
2817 * Maximum Tx DMA length : 512
2818 * Allow Tx DMA burst.
2820 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2821 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2822 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2823 sc->jme_txcsr |= sc->jme_tx_dma_size;
2824 sc->jme_txcsr |= TXCSR_DMA_BURST;
2825 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2827 /* Set Tx descriptor counter. */
2828 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2830 /* Set Tx ring address to the hardware. */
2831 paddr = JME_TX_RING_ADDR(sc, 0);
2832 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2833 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2835 /* Configure TxMAC parameters. */
2836 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2837 reg |= TXMAC_THRESH_1_PKT;
2838 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2839 CSR_WRITE_4(sc, JME_TXMAC, reg);
2842 * Configure Rx queue.
2843 * FIFO full threshold for transmitting Tx pause packet : 128T
2844 * FIFO threshold for processing next packet : 128QW
2846 * Max Rx DMA length : 128
2847 * Rx descriptor retry : 32
2848 * Rx descriptor retry time gap : 256ns
2849 * Don't receive runt/bad frame.
2851 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2853 * Since Rx FIFO size is 4K bytes, receiving frames larger
2854 * than 4K bytes will suffer from Rx FIFO overruns. So
2855 * decrease FIFO threshold to reduce the FIFO overruns for
2856 * frames larger than 4000 bytes.
2857 * For best performance of standard MTU sized frames use
2858 * maximum allowable FIFO threshold, 128QW. Note these do
2859 * not hold on chip full mask verion >=2. For these
2860 * controllers 64QW and 128QW are not valid value.
2862 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2863 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2865 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2866 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2867 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2869 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2871 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2872 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2873 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2874 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2876 /* Set Rx descriptor counter. */
2877 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2879 /* Set Rx ring address to the hardware. */
2880 paddr = JME_RX_RING_ADDR(sc, 0);
2881 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2882 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2884 /* Clear receive filter. */
2885 CSR_WRITE_4(sc, JME_RXMAC, 0);
2886 /* Set up the receive filter. */
2891 * Disable all WOL bits as WOL can interfere normal Rx
2892 * operation. Also clear WOL detection status bits.
2894 reg = CSR_READ_4(sc, JME_PMCS);
2895 reg &= ~PMCS_WOL_ENB_MASK;
2896 CSR_WRITE_4(sc, JME_PMCS, reg);
2898 reg = CSR_READ_4(sc, JME_RXMAC);
2900 * Pad 10bytes right before received frame. This will greatly
2901 * help Rx performance on strict-alignment architectures as
2902 * it does not need to copy the frame to align the payload.
2904 reg |= RXMAC_PAD_10BYTES;
2905 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2906 reg |= RXMAC_CSUM_ENB;
2907 CSR_WRITE_4(sc, JME_RXMAC, reg);
2909 /* Configure general purpose reg0 */
2910 reg = CSR_READ_4(sc, JME_GPREG0);
2911 reg &= ~GPREG0_PCC_UNIT_MASK;
2912 /* Set PCC timer resolution to micro-seconds unit. */
2913 reg |= GPREG0_PCC_UNIT_US;
2915 * Disable all shadow register posting as we have to read
2916 * JME_INTR_STATUS register in jme_int_task. Also it seems
2917 * that it's hard to synchronize interrupt status between
2918 * hardware and software with shadow posting due to
2919 * requirements of bus_dmamap_sync(9).
2921 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2922 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2923 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2924 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2925 /* Disable posting of DW0. */
2926 reg &= ~GPREG0_POST_DW0_ENB;
2927 /* Clear PME message. */
2928 reg &= ~GPREG0_PME_ENB;
2929 /* Set PHY address. */
2930 reg &= ~GPREG0_PHY_ADDR_MASK;
2931 reg |= sc->jme_phyaddr;
2932 CSR_WRITE_4(sc, JME_GPREG0, reg);
2934 /* Configure Tx queue 0 packet completion coalescing. */
2935 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2937 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2938 PCCTX_COAL_PKT_MASK;
2939 reg |= PCCTX_COAL_TXQ0;
2940 CSR_WRITE_4(sc, JME_PCCTX, reg);
2942 /* Configure Rx queue 0 packet completion coalescing. */
2943 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2945 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2946 PCCRX_COAL_PKT_MASK;
2947 CSR_WRITE_4(sc, JME_PCCRX0, reg);
2950 * Configure PCD(Packet Completion Deferring). It seems PCD
2951 * generates an interrupt when the time interval between two
2952 * back-to-back incoming/outgoing packet is long enough for
2953 * it to reach its timer value 0. The arrival of new packets
2954 * after timer has started causes the PCD timer to restart.
2955 * Unfortunately, it's not clear how PCD is useful at this
2956 * moment, so just use the same of PCC parameters.
2958 if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) {
2959 sc->jme_rx_pcd_to = sc->jme_rx_coal_to;
2960 if (sc->jme_rx_coal_to > PCDRX_TO_MAX)
2961 sc->jme_rx_pcd_to = PCDRX_TO_MAX;
2962 sc->jme_tx_pcd_to = sc->jme_tx_coal_to;
2963 if (sc->jme_tx_coal_to > PCDTX_TO_MAX)
2964 sc->jme_tx_pcd_to = PCDTX_TO_MAX;
2965 reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT;
2966 reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT;
2967 CSR_WRITE_4(sc, PCDRX_REG(0), reg);
2968 reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT;
2969 reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT;
2970 CSR_WRITE_4(sc, JME_PCDTX, reg);
2973 /* Configure shadow status block but don't enable posting. */
2974 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2975 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2976 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2978 /* Disable Timer 1 and Timer 2. */
2979 CSR_WRITE_4(sc, JME_TIMER1, 0);
2980 CSR_WRITE_4(sc, JME_TIMER2, 0);
2982 /* Configure retry transmit period, retry limit value. */
2983 CSR_WRITE_4(sc, JME_TXTRHD,
2984 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2985 TXTRHD_RT_PERIOD_MASK) |
2986 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2987 TXTRHD_RT_LIMIT_SHIFT));
2990 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2992 /* Initialize the interrupt mask. */
2993 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2994 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2997 * Enabling Tx/Rx DMA engines and Rx queue processing is
2998 * done after detection of valid link in jme_link_task.
3001 sc->jme_flags &= ~JME_FLAG_LINK;
3002 /* Set the current media. */
3005 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
3007 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3008 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3012 jme_stop(struct jme_softc *sc)
3015 struct jme_txdesc *txd;
3016 struct jme_rxdesc *rxd;
3019 JME_LOCK_ASSERT(sc);
3021 * Mark the interface down and cancel the watchdog timer.
3024 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3025 sc->jme_flags &= ~JME_FLAG_LINK;
3026 callout_stop(&sc->jme_tick_ch);
3027 sc->jme_watchdog_timer = 0;
3030 * Disable interrupts.
3032 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3033 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
3035 /* Disable updating shadow status block. */
3036 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
3037 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
3039 /* Stop receiver, transmitter. */
3043 /* Reclaim Rx/Tx buffers that have been completed. */
3044 jme_rxintr(sc, JME_RX_RING_CNT);
3045 if (sc->jme_cdata.jme_rxhead != NULL)
3046 m_freem(sc->jme_cdata.jme_rxhead);
3047 JME_RXCHAIN_RESET(sc);
3050 * Free RX and TX mbufs still in the queues.
3052 for (i = 0; i < JME_RX_RING_CNT; i++) {
3053 rxd = &sc->jme_cdata.jme_rxdesc[i];
3054 if (rxd->rx_m != NULL) {
3055 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
3056 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3057 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
3063 for (i = 0; i < JME_TX_RING_CNT; i++) {
3064 txd = &sc->jme_cdata.jme_txdesc[i];
3065 if (txd->tx_m != NULL) {
3066 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
3067 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3068 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
3075 jme_stats_update(sc);
3080 jme_stop_tx(struct jme_softc *sc)
3085 reg = CSR_READ_4(sc, JME_TXCSR);
3086 if ((reg & TXCSR_TX_ENB) == 0)
3088 reg &= ~TXCSR_TX_ENB;
3089 CSR_WRITE_4(sc, JME_TXCSR, reg);
3090 for (i = JME_TIMEOUT; i > 0; i--) {
3092 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3096 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3100 jme_stop_rx(struct jme_softc *sc)
3105 reg = CSR_READ_4(sc, JME_RXCSR);
3106 if ((reg & RXCSR_RX_ENB) == 0)
3108 reg &= ~RXCSR_RX_ENB;
3109 CSR_WRITE_4(sc, JME_RXCSR, reg);
3110 for (i = JME_TIMEOUT; i > 0; i--) {
3112 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3116 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
3120 jme_init_tx_ring(struct jme_softc *sc)
3122 struct jme_ring_data *rd;
3123 struct jme_txdesc *txd;
3126 sc->jme_cdata.jme_tx_prod = 0;
3127 sc->jme_cdata.jme_tx_cons = 0;
3128 sc->jme_cdata.jme_tx_cnt = 0;
3130 rd = &sc->jme_rdata;
3131 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
3132 for (i = 0; i < JME_TX_RING_CNT; i++) {
3133 txd = &sc->jme_cdata.jme_txdesc[i];
3135 txd->tx_desc = &rd->jme_tx_ring[i];
3139 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
3140 sc->jme_cdata.jme_tx_ring_map,
3141 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3145 jme_init_ssb(struct jme_softc *sc)
3147 struct jme_ring_data *rd;
3149 rd = &sc->jme_rdata;
3150 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
3151 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
3152 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3156 jme_init_rx_ring(struct jme_softc *sc)
3158 struct jme_ring_data *rd;
3159 struct jme_rxdesc *rxd;
3162 sc->jme_cdata.jme_rx_cons = 0;
3163 JME_RXCHAIN_RESET(sc);
3164 sc->jme_morework = 0;
3166 rd = &sc->jme_rdata;
3167 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3168 for (i = 0; i < JME_RX_RING_CNT; i++) {
3169 rxd = &sc->jme_cdata.jme_rxdesc[i];
3171 rxd->rx_desc = &rd->jme_rx_ring[i];
3172 if (jme_newbuf(sc, rxd) != 0)
3176 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3177 sc->jme_cdata.jme_rx_ring_map,
3178 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3184 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3186 struct jme_desc *desc;
3188 bus_dma_segment_t segs[1];
3192 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3196 * JMC250 has 64bit boundary alignment limitation so jme(4)
3197 * takes advantage of 10 bytes padding feature of hardware
3198 * in order not to copy entire frame to align IP header on
3201 m->m_len = m->m_pkthdr.len = MCLBYTES;
3203 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3204 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3208 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3210 if (rxd->rx_m != NULL) {
3211 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3212 BUS_DMASYNC_POSTREAD);
3213 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3215 map = rxd->rx_dmamap;
3216 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3217 sc->jme_cdata.jme_rx_sparemap = map;
3218 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3219 BUS_DMASYNC_PREREAD);
3222 desc = rxd->rx_desc;
3223 desc->buflen = htole32(segs[0].ds_len);
3224 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3225 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3226 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3232 jme_set_vlan(struct jme_softc *sc)
3237 JME_LOCK_ASSERT(sc);
3240 reg = CSR_READ_4(sc, JME_RXMAC);
3241 reg &= ~RXMAC_VLAN_ENB;
3242 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3243 reg |= RXMAC_VLAN_ENB;
3244 CSR_WRITE_4(sc, JME_RXMAC, reg);
3248 jme_set_filter(struct jme_softc *sc)
3251 struct ifmultiaddr *ifma;
3256 JME_LOCK_ASSERT(sc);
3260 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3261 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3263 /* Always accept frames destined to our station address. */
3264 rxcfg |= RXMAC_UNICAST;
3265 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3266 rxcfg |= RXMAC_BROADCAST;
3267 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3268 if ((ifp->if_flags & IFF_PROMISC) != 0)
3269 rxcfg |= RXMAC_PROMISC;
3270 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3271 rxcfg |= RXMAC_ALLMULTI;
3272 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3273 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3274 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3279 * Set up the multicast address filter by passing all multicast
3280 * addresses through a CRC generator, and then using the low-order
3281 * 6 bits as an index into the 64 bit multicast hash table. The
3282 * high order bits select the register, while the rest of the bits
3283 * select the bit within the register.
3285 rxcfg |= RXMAC_MULTICAST;
3286 bzero(mchash, sizeof(mchash));
3288 if_maddr_rlock(ifp);
3289 TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) {
3290 if (ifma->ifma_addr->sa_family != AF_LINK)
3292 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3293 ifma->ifma_addr), ETHER_ADDR_LEN);
3295 /* Just want the 6 least significant bits. */
3298 /* Set the corresponding bit in the hash table. */
3299 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3301 if_maddr_runlock(ifp);
3303 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3304 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3305 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3309 jme_stats_clear(struct jme_softc *sc)
3312 JME_LOCK_ASSERT(sc);
3314 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3317 /* Disable and clear counters. */
3318 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3319 /* Activate hw counters. */
3320 CSR_WRITE_4(sc, JME_STATCSR, 0);
3321 CSR_READ_4(sc, JME_STATCSR);
3322 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3326 jme_stats_save(struct jme_softc *sc)
3329 JME_LOCK_ASSERT(sc);
3331 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3333 /* Save current counters. */
3334 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3335 /* Disable and clear counters. */
3336 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3340 jme_stats_update(struct jme_softc *sc)
3342 struct jme_hw_stats *stat, *ostat;
3345 JME_LOCK_ASSERT(sc);
3347 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3349 stat = &sc->jme_stats;
3350 ostat = &sc->jme_ostats;
3351 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3352 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3353 reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3354 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3355 STAT_RX_CRC_ERR_SHIFT;
3356 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3357 STAT_RX_MII_ERR_SHIFT;
3358 reg = CSR_READ_4(sc, JME_STAT_RXERR);
3359 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3360 STAT_RXERR_OFLOW_SHIFT;
3361 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3362 STAT_RXERR_MPTY_SHIFT;
3363 reg = CSR_READ_4(sc, JME_STAT_FAIL);
3364 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3365 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3367 /* Account for previous counters. */
3368 stat->rx_good_frames += ostat->rx_good_frames;
3369 stat->rx_crc_errs += ostat->rx_crc_errs;
3370 stat->rx_mii_errs += ostat->rx_mii_errs;
3371 stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3372 stat->rx_desc_empty += ostat->rx_desc_empty;
3373 stat->rx_bad_frames += ostat->rx_bad_frames;
3374 stat->tx_good_frames += ostat->tx_good_frames;
3375 stat->tx_bad_frames += ostat->tx_bad_frames;
3379 jme_phy_down(struct jme_softc *sc)
3383 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN);
3384 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3385 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3387 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3388 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3389 reg &= ~PE1_GIGA_PDOWN_MASK;
3390 reg |= PE1_GIGA_PDOWN_D3;
3391 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3396 jme_phy_up(struct jme_softc *sc)
3401 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
3402 bmcr &= ~BMCR_PDOWN;
3403 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
3404 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3405 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3407 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3408 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3409 reg &= ~PE1_GIGA_PDOWN_MASK;
3410 reg |= PE1_GIGA_PDOWN_DIS;
3411 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3416 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3422 value = *(int *)arg1;
3423 error = sysctl_handle_int(oidp, &value, 0, req);
3424 if (error || req->newptr == NULL)
3426 if (value < low || value > high)
3428 *(int *)arg1 = value;
3434 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3436 return (sysctl_int_range(oidp, arg1, arg2, req,
3437 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3441 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3443 return (sysctl_int_range(oidp, arg1, arg2, req,
3444 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3448 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3450 return (sysctl_int_range(oidp, arg1, arg2, req,
3451 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3455 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3457 return (sysctl_int_range(oidp, arg1, arg2, req,
3458 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3462 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3464 return (sysctl_int_range(oidp, arg1, arg2, req,
3465 JME_PROC_MIN, JME_PROC_MAX));