2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
36 #include <sys/endian.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
41 #include <sys/module.h>
43 #include <sys/queue.h>
44 #include <sys/socket.h>
45 #include <sys/sockio.h>
46 #include <sys/sysctl.h>
47 #include <sys/taskqueue.h>
51 #include <net/if_var.h>
52 #include <net/if_arp.h>
53 #include <net/ethernet.h>
54 #include <net/if_dl.h>
55 #include <net/if_media.h>
56 #include <net/if_types.h>
57 #include <net/if_vlan_var.h>
59 #include <netinet/in.h>
60 #include <netinet/in_systm.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
64 #include <dev/mii/mii.h>
65 #include <dev/mii/miivar.h>
67 #include <dev/pci/pcireg.h>
68 #include <dev/pci/pcivar.h>
70 #include <machine/bus.h>
71 #include <machine/in_cksum.h>
73 #include <dev/jme/if_jmereg.h>
74 #include <dev/jme/if_jmevar.h>
76 /* "device miibus" required. See GENERIC if you get errors here. */
77 #include "miibus_if.h"
79 /* Define the following to disable printing Rx errors. */
80 #undef JME_SHOW_ERRORS
82 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
84 MODULE_DEPEND(jme, pci, 1, 1, 1);
85 MODULE_DEPEND(jme, ether, 1, 1, 1);
86 MODULE_DEPEND(jme, miibus, 1, 1, 1);
89 static int msi_disable = 0;
90 static int msix_disable = 0;
91 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
92 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
95 * Devices supported by this driver.
97 static struct jme_dev {
98 uint16_t jme_vendorid;
99 uint16_t jme_deviceid;
100 const char *jme_name;
102 { VENDORID_JMICRON, DEVICEID_JMC250,
103 "JMicron Inc, JMC25x Gigabit Ethernet" },
104 { VENDORID_JMICRON, DEVICEID_JMC260,
105 "JMicron Inc, JMC26x Fast Ethernet" },
108 static int jme_miibus_readreg(device_t, int, int);
109 static int jme_miibus_writereg(device_t, int, int, int);
110 static void jme_miibus_statchg(device_t);
111 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
112 static int jme_mediachange(struct ifnet *);
113 static int jme_probe(device_t);
114 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
115 static int jme_eeprom_macaddr(struct jme_softc *);
116 static int jme_efuse_macaddr(struct jme_softc *);
117 static void jme_reg_macaddr(struct jme_softc *);
118 static void jme_set_macaddr(struct jme_softc *, uint8_t *);
119 static void jme_map_intr_vector(struct jme_softc *);
120 static int jme_attach(device_t);
121 static int jme_detach(device_t);
122 static void jme_sysctl_node(struct jme_softc *);
123 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
124 static int jme_dma_alloc(struct jme_softc *);
125 static void jme_dma_free(struct jme_softc *);
126 static int jme_shutdown(device_t);
127 static void jme_setlinkspeed(struct jme_softc *);
128 static void jme_setwol(struct jme_softc *);
129 static int jme_suspend(device_t);
130 static int jme_resume(device_t);
131 static int jme_encap(struct jme_softc *, struct mbuf **);
132 static void jme_start(struct ifnet *);
133 static void jme_start_locked(struct ifnet *);
134 static void jme_watchdog(struct jme_softc *);
135 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
136 static void jme_mac_config(struct jme_softc *);
137 static void jme_link_task(void *, int);
138 static int jme_intr(void *);
139 static void jme_int_task(void *, int);
140 static void jme_txeof(struct jme_softc *);
141 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
142 static void jme_rxeof(struct jme_softc *);
143 static int jme_rxintr(struct jme_softc *, int);
144 static void jme_tick(void *);
145 static void jme_reset(struct jme_softc *);
146 static void jme_init(void *);
147 static void jme_init_locked(struct jme_softc *);
148 static void jme_stop(struct jme_softc *);
149 static void jme_stop_tx(struct jme_softc *);
150 static void jme_stop_rx(struct jme_softc *);
151 static int jme_init_rx_ring(struct jme_softc *);
152 static void jme_init_tx_ring(struct jme_softc *);
153 static void jme_init_ssb(struct jme_softc *);
154 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
155 static void jme_set_vlan(struct jme_softc *);
156 static void jme_set_filter(struct jme_softc *);
157 static void jme_stats_clear(struct jme_softc *);
158 static void jme_stats_save(struct jme_softc *);
159 static void jme_stats_update(struct jme_softc *);
160 static void jme_phy_down(struct jme_softc *);
161 static void jme_phy_up(struct jme_softc *);
162 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
163 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
164 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
165 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
166 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
167 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
170 static device_method_t jme_methods[] = {
171 /* Device interface. */
172 DEVMETHOD(device_probe, jme_probe),
173 DEVMETHOD(device_attach, jme_attach),
174 DEVMETHOD(device_detach, jme_detach),
175 DEVMETHOD(device_shutdown, jme_shutdown),
176 DEVMETHOD(device_suspend, jme_suspend),
177 DEVMETHOD(device_resume, jme_resume),
180 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
181 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
182 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
187 static driver_t jme_driver = {
190 sizeof(struct jme_softc)
193 static devclass_t jme_devclass;
195 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0);
196 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
198 static struct resource_spec jme_res_spec_mem[] = {
199 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
203 static struct resource_spec jme_irq_spec_legacy[] = {
204 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
208 static struct resource_spec jme_irq_spec_msi[] = {
209 { SYS_RES_IRQ, 1, RF_ACTIVE },
214 * Read a PHY register on the MII of the JMC250.
217 jme_miibus_readreg(device_t dev, int phy, int reg)
219 struct jme_softc *sc;
223 sc = device_get_softc(dev);
225 /* For FPGA version, PHY address 0 should be ignored. */
226 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
229 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
230 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
231 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
233 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
238 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
242 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
246 * Write a PHY register on the MII of the JMC250.
249 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
251 struct jme_softc *sc;
254 sc = device_get_softc(dev);
256 /* For FPGA version, PHY address 0 should be ignored. */
257 if ((sc->jme_flags & JME_FLAG_FPGA) != 0 && phy == 0)
260 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
261 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
262 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
263 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
265 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
270 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
276 * Callback from MII layer when media changes.
279 jme_miibus_statchg(device_t dev)
281 struct jme_softc *sc;
283 sc = device_get_softc(dev);
284 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
288 * Get the current interface media status.
291 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
293 struct jme_softc *sc;
294 struct mii_data *mii;
298 if ((ifp->if_flags & IFF_UP) == 0) {
302 mii = device_get_softc(sc->jme_miibus);
305 ifmr->ifm_status = mii->mii_media_status;
306 ifmr->ifm_active = mii->mii_media_active;
311 * Set hardware to newly-selected media.
314 jme_mediachange(struct ifnet *ifp)
316 struct jme_softc *sc;
317 struct mii_data *mii;
318 struct mii_softc *miisc;
323 mii = device_get_softc(sc->jme_miibus);
324 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
326 error = mii_mediachg(mii);
333 jme_probe(device_t dev)
337 uint16_t vendor, devid;
339 vendor = pci_get_vendor(dev);
340 devid = pci_get_device(dev);
342 for (i = 0; i < nitems(jme_devs); i++, sp++) {
343 if (vendor == sp->jme_vendorid &&
344 devid == sp->jme_deviceid) {
345 device_set_desc(dev, sp->jme_name);
346 return (BUS_PROBE_DEFAULT);
354 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
360 for (i = JME_TIMEOUT; i > 0; i--) {
361 reg = CSR_READ_4(sc, JME_SMBCSR);
362 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
368 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
372 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
373 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
374 for (i = JME_TIMEOUT; i > 0; i--) {
376 reg = CSR_READ_4(sc, JME_SMBINTF);
377 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
382 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
386 reg = CSR_READ_4(sc, JME_SMBINTF);
387 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
393 jme_eeprom_macaddr(struct jme_softc *sc)
395 uint8_t eaddr[ETHER_ADDR_LEN];
396 uint8_t fup, reg, val;
401 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
402 fup != JME_EEPROM_SIG0)
404 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
405 fup != JME_EEPROM_SIG1)
409 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
411 if (JME_EEPROM_MKDESC(JME_EEPROM_FUNC0, JME_EEPROM_PAGE_BAR1) ==
412 (fup & (JME_EEPROM_FUNC_MASK | JME_EEPROM_PAGE_MASK))) {
413 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
415 if (reg >= JME_PAR0 &&
416 reg < JME_PAR0 + ETHER_ADDR_LEN) {
417 if (jme_eeprom_read_byte(sc, offset + 2,
420 eaddr[reg - JME_PAR0] = val;
424 /* Check for the end of EEPROM descriptor. */
425 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
427 /* Try next eeprom descriptor. */
428 offset += JME_EEPROM_DESC_BYTES;
429 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
431 if (match == ETHER_ADDR_LEN) {
432 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
440 jme_efuse_macaddr(struct jme_softc *sc)
445 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
446 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR | EFUSE_CTL1_AUTOLAOD_DONE)) !=
447 EFUSE_CTL1_AUTOLAOD_DONE)
449 /* Reset eFuse controller. */
450 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
451 reg |= EFUSE_CTL2_RESET;
452 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
453 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL2, 4);
454 reg &= ~EFUSE_CTL2_RESET;
455 pci_write_config(sc->jme_dev, JME_EFUSE_CTL2, reg, 4);
457 /* Have eFuse reload station address to MAC controller. */
458 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
459 reg &= ~EFUSE_CTL1_CMD_MASK;
460 reg |= EFUSE_CTL1_CMD_AUTOLOAD | EFUSE_CTL1_EXECUTE;
461 pci_write_config(sc->jme_dev, JME_EFUSE_CTL1, reg, 4);
464 * Verify completion of eFuse autload command. It should be
465 * completed within 108us.
468 for (i = 10; i > 0; i--) {
469 reg = pci_read_config(sc->jme_dev, JME_EFUSE_CTL1, 4);
470 if ((reg & (EFUSE_CTL1_AUTOLOAD_ERR |
471 EFUSE_CTL1_AUTOLAOD_DONE)) != EFUSE_CTL1_AUTOLAOD_DONE) {
475 if ((reg & EFUSE_CTL1_EXECUTE) == 0)
477 /* Station address loading is still in progress. */
481 device_printf(sc->jme_dev, "eFuse autoload timed out.\n");
489 jme_reg_macaddr(struct jme_softc *sc)
493 /* Read station address. */
494 par0 = CSR_READ_4(sc, JME_PAR0);
495 par1 = CSR_READ_4(sc, JME_PAR1);
497 if ((par0 == 0 && par1 == 0) ||
498 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
499 device_printf(sc->jme_dev,
500 "Failed to retrieve Ethernet address.\n");
503 * For controllers that use eFuse, the station address
504 * could also be extracted from JME_PCI_PAR0 and
505 * JME_PCI_PAR1 registers in PCI configuration space.
506 * Each register holds exactly half of station address(24bits)
507 * so use JME_PAR0, JME_PAR1 registers instead.
509 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
510 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
511 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
512 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
513 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
514 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
519 jme_set_macaddr(struct jme_softc *sc, uint8_t *eaddr)
524 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
526 * Avoid reprogramming station address if the address
527 * is the same as previous one. Note, reprogrammed
528 * station address is permanent as if it was written
529 * to EEPROM. So if station address was changed by
530 * admistrator it's possible to lose factory configured
531 * address when driver fails to restore its address.
532 * (e.g. reboot or system crash)
534 if (bcmp(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN) != 0) {
535 for (i = 0; i < ETHER_ADDR_LEN; i++) {
536 val = JME_EFUSE_EEPROM_FUNC0 <<
537 JME_EFUSE_EEPROM_FUNC_SHIFT;
538 val |= JME_EFUSE_EEPROM_PAGE_BAR1 <<
539 JME_EFUSE_EEPROM_PAGE_SHIFT;
540 val |= (JME_PAR0 + i) <<
541 JME_EFUSE_EEPROM_ADDR_SHIFT;
542 val |= eaddr[i] << JME_EFUSE_EEPROM_DATA_SHIFT;
543 pci_write_config(sc->jme_dev, JME_EFUSE_EEPROM,
544 val | JME_EFUSE_EEPROM_WRITE, 4);
548 CSR_WRITE_4(sc, JME_PAR0,
549 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
550 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
555 jme_map_intr_vector(struct jme_softc *sc)
557 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
559 bzero(map, sizeof(map));
561 /* Map Tx interrupts source to MSI/MSIX vector 2. */
562 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] |=
563 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
564 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
565 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
566 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
567 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
568 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
569 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
570 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
571 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
572 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
573 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
574 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
575 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
576 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
577 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
578 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
579 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
580 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
581 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
583 /* Map Rx interrupts source to MSI/MSIX vector 1. */
584 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] |=
585 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
586 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] |=
587 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
588 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] |=
589 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
590 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] |=
591 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
592 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] |=
593 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
594 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] |=
595 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
596 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] |=
597 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
598 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] |=
599 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
600 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] |=
601 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
602 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] |=
603 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
604 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] |=
605 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
606 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] |=
607 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
608 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] |=
609 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
610 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] |=
611 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
612 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] |=
613 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
614 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] |=
615 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
617 /* Map all other interrupts source to MSI/MSIX vector 0. */
618 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
619 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
620 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
621 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
625 jme_attach(device_t dev)
627 struct jme_softc *sc;
629 struct mii_softc *miisc;
630 struct mii_data *mii;
633 int error, i, mii_flags, msic, msixc, pmc;
636 sc = device_get_softc(dev);
639 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
641 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
642 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
643 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
646 * Map the device. JMC250 supports both memory mapped and I/O
647 * register space access. Because I/O register access should
648 * use different BARs to access registers it's waste of time
649 * to use I/O register spce access. JMC250 uses 16K to map
650 * entire memory space.
652 pci_enable_busmaster(dev);
653 sc->jme_res_spec = jme_res_spec_mem;
654 sc->jme_irq_spec = jme_irq_spec_legacy;
655 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
657 device_printf(dev, "cannot allocate memory resources.\n");
661 /* Allocate IRQ resources. */
662 msixc = pci_msix_count(dev);
663 msic = pci_msi_count(dev);
665 device_printf(dev, "MSIX count : %d\n", msixc);
666 device_printf(dev, "MSI count : %d\n", msic);
669 /* Use 1 MSI/MSI-X. */
674 /* Prefer MSIX over MSI. */
675 if (msix_disable == 0 || msi_disable == 0) {
676 if (msix_disable == 0 && msixc > 0 &&
677 pci_alloc_msix(dev, &msixc) == 0) {
679 device_printf(dev, "Using %d MSIX messages.\n",
681 sc->jme_flags |= JME_FLAG_MSIX;
682 sc->jme_irq_spec = jme_irq_spec_msi;
684 pci_release_msi(dev);
686 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
687 msic > 0 && pci_alloc_msi(dev, &msic) == 0) {
689 device_printf(dev, "Using %d MSI messages.\n",
691 sc->jme_flags |= JME_FLAG_MSI;
692 sc->jme_irq_spec = jme_irq_spec_msi;
694 pci_release_msi(dev);
696 /* Map interrupt vector 0, 1 and 2. */
697 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
698 (sc->jme_flags & JME_FLAG_MSIX) != 0)
699 jme_map_intr_vector(sc);
702 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
704 device_printf(dev, "cannot allocate IRQ resources.\n");
708 sc->jme_rev = pci_get_device(dev);
709 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260) {
710 sc->jme_flags |= JME_FLAG_FASTETH;
711 sc->jme_flags |= JME_FLAG_NOJUMBO;
713 reg = CSR_READ_4(sc, JME_CHIPMODE);
714 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
715 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
717 sc->jme_flags |= JME_FLAG_FPGA;
719 device_printf(dev, "PCI device revision : 0x%04x\n",
721 device_printf(dev, "Chip revision : 0x%02x\n",
723 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
724 device_printf(dev, "FPGA revision : 0x%04x\n",
725 (reg & CHIPMODE_FPGA_REV_MASK) >>
726 CHIPMODE_FPGA_REV_SHIFT);
728 if (sc->jme_chip_rev == 0xFF) {
729 device_printf(dev, "Unknown chip revision : 0x%02x\n",
735 /* Identify controller features and bugs. */
736 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2) {
737 if ((sc->jme_rev & DEVICEID_JMC2XX_MASK) == DEVICEID_JMC260 &&
738 CHIPMODE_REVFM(sc->jme_chip_rev) == 2)
739 sc->jme_flags |= JME_FLAG_DMA32BIT;
740 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
741 sc->jme_flags |= JME_FLAG_EFUSE | JME_FLAG_PCCPCD;
742 sc->jme_flags |= JME_FLAG_TXCLK | JME_FLAG_RXCLK;
743 sc->jme_flags |= JME_FLAG_HWMIB;
746 /* Reset the ethernet controller. */
749 /* Get station address. */
750 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0) {
751 error = jme_efuse_macaddr(sc);
756 reg = CSR_READ_4(sc, JME_SMBCSR);
757 if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
758 error = jme_eeprom_macaddr(sc);
759 if (error != 0 && bootverbose)
760 device_printf(sc->jme_dev,
761 "ethernet hardware address not found in EEPROM.\n");
768 * Integrated JR0211 has fixed PHY address whereas FPGA version
769 * requires PHY probing to get correct PHY address.
771 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
772 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
773 GPREG0_PHY_ADDR_MASK;
775 device_printf(dev, "PHY is at address %d.\n",
780 /* Set max allowable DMA size. */
781 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
782 sc->jme_flags |= JME_FLAG_PCIE;
783 burst = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
785 device_printf(dev, "Read request size : %d bytes.\n",
786 128 << ((burst >> 12) & 0x07));
787 device_printf(dev, "TLP payload size : %d bytes.\n",
788 128 << ((burst >> 5) & 0x07));
790 switch ((burst >> 12) & 0x07) {
792 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
795 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
798 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
801 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
803 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
804 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
806 /* Create coalescing sysctl node. */
808 if ((error = jme_dma_alloc(sc)) != 0)
811 ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
813 device_printf(dev, "cannot allocate ifnet structure.\n");
819 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
820 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
821 ifp->if_ioctl = jme_ioctl;
822 ifp->if_start = jme_start;
823 ifp->if_init = jme_init;
824 ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
825 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
826 IFQ_SET_READY(&ifp->if_snd);
827 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
828 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
829 ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
830 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
831 sc->jme_flags |= JME_FLAG_PMCAP;
832 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
834 ifp->if_capenable = ifp->if_capabilities;
838 mii_flags = MIIF_DOPAUSE;
839 /* Ask PHY calibration to PHY driver. */
840 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5)
841 mii_flags |= MIIF_MACPRIV0;
842 /* Set up MII bus. */
843 error = mii_attach(dev, &sc->jme_miibus, ifp, jme_mediachange,
844 jme_mediastatus, BMSR_DEFCAPMASK,
845 sc->jme_flags & JME_FLAG_FPGA ? MII_PHY_ANY : sc->jme_phyaddr,
846 MII_OFFSET_ANY, mii_flags);
848 device_printf(dev, "attaching PHYs failed\n");
853 * Force PHY to FPGA mode.
855 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
856 mii = device_get_softc(sc->jme_miibus);
857 if (mii->mii_instance != 0) {
858 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
859 if (miisc->mii_phy != 0) {
860 sc->jme_phyaddr = miisc->mii_phy;
864 if (sc->jme_phyaddr != 0) {
865 device_printf(sc->jme_dev,
866 "FPGA PHY is at %d\n", sc->jme_phyaddr);
868 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
874 ether_ifattach(ifp, sc->jme_eaddr);
876 /* VLAN capability setup */
877 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
878 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO;
879 ifp->if_capenable = ifp->if_capabilities;
881 /* Tell the upper layer(s) we support long frames. */
882 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
884 /* Create local taskq. */
885 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
886 taskqueue_thread_enqueue, &sc->jme_tq);
887 if (sc->jme_tq == NULL) {
888 device_printf(dev, "could not create taskqueue.\n");
893 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
894 device_get_nameunit(sc->jme_dev));
896 for (i = 0; i < 1; i++) {
897 error = bus_setup_intr(dev, sc->jme_irq[i],
898 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
899 &sc->jme_intrhand[i]);
905 device_printf(dev, "could not set up interrupt handler.\n");
906 taskqueue_free(sc->jme_tq);
920 jme_detach(device_t dev)
922 struct jme_softc *sc;
926 sc = device_get_softc(dev);
929 if (device_is_attached(dev)) {
931 sc->jme_flags |= JME_FLAG_DETACH;
934 callout_drain(&sc->jme_tick_ch);
935 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
936 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
937 /* Restore possibly modified station address. */
938 if ((sc->jme_flags & JME_FLAG_EFUSE) != 0)
939 jme_set_macaddr(sc, sc->jme_eaddr);
943 if (sc->jme_tq != NULL) {
944 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
945 taskqueue_free(sc->jme_tq);
949 if (sc->jme_miibus != NULL) {
950 device_delete_child(dev, sc->jme_miibus);
951 sc->jme_miibus = NULL;
953 bus_generic_detach(dev);
961 for (i = 0; i < 1; i++) {
962 if (sc->jme_intrhand[i] != NULL) {
963 bus_teardown_intr(dev, sc->jme_irq[i],
964 sc->jme_intrhand[i]);
965 sc->jme_intrhand[i] = NULL;
969 if (sc->jme_irq[0] != NULL)
970 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
971 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
972 pci_release_msi(dev);
973 if (sc->jme_res[0] != NULL)
974 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
975 mtx_destroy(&sc->jme_mtx);
980 #define JME_SYSCTL_STAT_ADD32(c, h, n, p, d) \
981 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d)
984 jme_sysctl_node(struct jme_softc *sc)
986 struct sysctl_ctx_list *ctx;
987 struct sysctl_oid_list *child, *parent;
988 struct sysctl_oid *tree;
989 struct jme_hw_stats *stats;
992 stats = &sc->jme_stats;
993 ctx = device_get_sysctl_ctx(sc->jme_dev);
994 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev));
996 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_to",
997 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to, 0,
998 sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
1000 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_coal_pkt",
1001 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt, 0,
1002 sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
1004 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_to",
1005 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to, 0,
1006 sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
1008 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_coal_pkt",
1009 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt, 0,
1010 sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
1012 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit",
1013 CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit, 0,
1014 sysctl_hw_jme_proc_limit, "I",
1015 "max number of Rx events to process");
1017 /* Pull in device tunables. */
1018 sc->jme_process_limit = JME_PROC_DEFAULT;
1019 error = resource_int_value(device_get_name(sc->jme_dev),
1020 device_get_unit(sc->jme_dev), "process_limit",
1021 &sc->jme_process_limit);
1023 if (sc->jme_process_limit < JME_PROC_MIN ||
1024 sc->jme_process_limit > JME_PROC_MAX) {
1025 device_printf(sc->jme_dev,
1026 "process_limit value out of range; "
1027 "using default: %d\n", JME_PROC_DEFAULT);
1028 sc->jme_process_limit = JME_PROC_DEFAULT;
1032 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1033 error = resource_int_value(device_get_name(sc->jme_dev),
1034 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
1036 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
1037 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
1038 device_printf(sc->jme_dev,
1039 "tx_coal_to value out of range; "
1040 "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
1041 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
1045 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1046 error = resource_int_value(device_get_name(sc->jme_dev),
1047 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
1049 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
1050 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
1051 device_printf(sc->jme_dev,
1052 "tx_coal_pkt value out of range; "
1053 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
1054 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
1058 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1059 error = resource_int_value(device_get_name(sc->jme_dev),
1060 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
1062 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
1063 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
1064 device_printf(sc->jme_dev,
1065 "rx_coal_to value out of range; "
1066 "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
1067 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
1071 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1072 error = resource_int_value(device_get_name(sc->jme_dev),
1073 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
1075 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
1076 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
1077 device_printf(sc->jme_dev,
1078 "tx_coal_pkt value out of range; "
1079 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
1080 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
1084 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
1087 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD,
1088 NULL, "JME statistics");
1089 parent = SYSCTL_CHILDREN(tree);
1091 /* Rx statistics. */
1092 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD,
1093 NULL, "Rx MAC statistics");
1094 child = SYSCTL_CHILDREN(tree);
1095 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1096 &stats->rx_good_frames, "Good frames");
1097 JME_SYSCTL_STAT_ADD32(ctx, child, "crc_errs",
1098 &stats->rx_crc_errs, "CRC errors");
1099 JME_SYSCTL_STAT_ADD32(ctx, child, "mii_errs",
1100 &stats->rx_mii_errs, "MII errors");
1101 JME_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows",
1102 &stats->rx_fifo_oflows, "FIFO overflows");
1103 JME_SYSCTL_STAT_ADD32(ctx, child, "desc_empty",
1104 &stats->rx_desc_empty, "Descriptor empty");
1105 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1106 &stats->rx_bad_frames, "Bad frames");
1108 /* Tx statistics. */
1109 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD,
1110 NULL, "Tx MAC statistics");
1111 child = SYSCTL_CHILDREN(tree);
1112 JME_SYSCTL_STAT_ADD32(ctx, child, "good_frames",
1113 &stats->tx_good_frames, "Good frames");
1114 JME_SYSCTL_STAT_ADD32(ctx, child, "bad_frames",
1115 &stats->tx_bad_frames, "Bad frames");
1118 #undef JME_SYSCTL_STAT_ADD32
1120 struct jme_dmamap_arg {
1121 bus_addr_t jme_busaddr;
1125 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1127 struct jme_dmamap_arg *ctx;
1132 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1134 ctx = (struct jme_dmamap_arg *)arg;
1135 ctx->jme_busaddr = segs[0].ds_addr;
1139 jme_dma_alloc(struct jme_softc *sc)
1141 struct jme_dmamap_arg ctx;
1142 struct jme_txdesc *txd;
1143 struct jme_rxdesc *rxd;
1144 bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1147 lowaddr = BUS_SPACE_MAXADDR;
1148 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1149 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1152 /* Create parent ring tag. */
1153 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1154 1, 0, /* algnmnt, boundary */
1155 lowaddr, /* lowaddr */
1156 BUS_SPACE_MAXADDR, /* highaddr */
1157 NULL, NULL, /* filter, filterarg */
1158 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1160 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1162 NULL, NULL, /* lockfunc, lockarg */
1163 &sc->jme_cdata.jme_ring_tag);
1165 device_printf(sc->jme_dev,
1166 "could not create parent ring DMA tag.\n");
1169 /* Create tag for Tx ring. */
1170 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1171 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1172 BUS_SPACE_MAXADDR, /* lowaddr */
1173 BUS_SPACE_MAXADDR, /* highaddr */
1174 NULL, NULL, /* filter, filterarg */
1175 JME_TX_RING_SIZE, /* maxsize */
1177 JME_TX_RING_SIZE, /* maxsegsize */
1179 NULL, NULL, /* lockfunc, lockarg */
1180 &sc->jme_cdata.jme_tx_ring_tag);
1182 device_printf(sc->jme_dev,
1183 "could not allocate Tx ring DMA tag.\n");
1187 /* Create tag for Rx ring. */
1188 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1189 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1190 lowaddr, /* lowaddr */
1191 BUS_SPACE_MAXADDR, /* highaddr */
1192 NULL, NULL, /* filter, filterarg */
1193 JME_RX_RING_SIZE, /* maxsize */
1195 JME_RX_RING_SIZE, /* maxsegsize */
1197 NULL, NULL, /* lockfunc, lockarg */
1198 &sc->jme_cdata.jme_rx_ring_tag);
1200 device_printf(sc->jme_dev,
1201 "could not allocate Rx ring DMA tag.\n");
1205 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1206 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1207 (void **)&sc->jme_rdata.jme_tx_ring,
1208 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1209 &sc->jme_cdata.jme_tx_ring_map);
1211 device_printf(sc->jme_dev,
1212 "could not allocate DMA'able memory for Tx ring.\n");
1216 ctx.jme_busaddr = 0;
1217 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1218 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1219 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1220 if (error != 0 || ctx.jme_busaddr == 0) {
1221 device_printf(sc->jme_dev,
1222 "could not load DMA'able memory for Tx ring.\n");
1225 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1227 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1228 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1229 (void **)&sc->jme_rdata.jme_rx_ring,
1230 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1231 &sc->jme_cdata.jme_rx_ring_map);
1233 device_printf(sc->jme_dev,
1234 "could not allocate DMA'able memory for Rx ring.\n");
1238 ctx.jme_busaddr = 0;
1239 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1240 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1241 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1242 if (error != 0 || ctx.jme_busaddr == 0) {
1243 device_printf(sc->jme_dev,
1244 "could not load DMA'able memory for Rx ring.\n");
1247 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1249 if (lowaddr != BUS_SPACE_MAXADDR_32BIT) {
1250 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1251 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr +
1253 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr +
1255 if ((JME_ADDR_HI(tx_ring_end) !=
1256 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1257 (JME_ADDR_HI(rx_ring_end) !=
1258 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1259 device_printf(sc->jme_dev, "4GB boundary crossed, "
1260 "switching to 32bit DMA address mode.\n");
1262 /* Limit DMA address space to 32bit and try again. */
1263 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1268 lowaddr = BUS_SPACE_MAXADDR;
1269 if ((sc->jme_flags & JME_FLAG_DMA32BIT) != 0)
1270 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1271 /* Create parent buffer tag. */
1272 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1273 1, 0, /* algnmnt, boundary */
1274 lowaddr, /* lowaddr */
1275 BUS_SPACE_MAXADDR, /* highaddr */
1276 NULL, NULL, /* filter, filterarg */
1277 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1279 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1281 NULL, NULL, /* lockfunc, lockarg */
1282 &sc->jme_cdata.jme_buffer_tag);
1284 device_printf(sc->jme_dev,
1285 "could not create parent buffer DMA tag.\n");
1289 /* Create shadow status block tag. */
1290 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1291 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1292 BUS_SPACE_MAXADDR, /* lowaddr */
1293 BUS_SPACE_MAXADDR, /* highaddr */
1294 NULL, NULL, /* filter, filterarg */
1295 JME_SSB_SIZE, /* maxsize */
1297 JME_SSB_SIZE, /* maxsegsize */
1299 NULL, NULL, /* lockfunc, lockarg */
1300 &sc->jme_cdata.jme_ssb_tag);
1302 device_printf(sc->jme_dev,
1303 "could not create shared status block DMA tag.\n");
1307 /* Create tag for Tx buffers. */
1308 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1309 1, 0, /* algnmnt, boundary */
1310 BUS_SPACE_MAXADDR, /* lowaddr */
1311 BUS_SPACE_MAXADDR, /* highaddr */
1312 NULL, NULL, /* filter, filterarg */
1313 JME_TSO_MAXSIZE, /* maxsize */
1314 JME_MAXTXSEGS, /* nsegments */
1315 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1317 NULL, NULL, /* lockfunc, lockarg */
1318 &sc->jme_cdata.jme_tx_tag);
1320 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1324 /* Create tag for Rx buffers. */
1325 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1326 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1327 BUS_SPACE_MAXADDR, /* lowaddr */
1328 BUS_SPACE_MAXADDR, /* highaddr */
1329 NULL, NULL, /* filter, filterarg */
1330 MCLBYTES, /* maxsize */
1332 MCLBYTES, /* maxsegsize */
1334 NULL, NULL, /* lockfunc, lockarg */
1335 &sc->jme_cdata.jme_rx_tag);
1337 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1342 * Allocate DMA'able memory and load the DMA map for shared
1345 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1346 (void **)&sc->jme_rdata.jme_ssb_block,
1347 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1348 &sc->jme_cdata.jme_ssb_map);
1350 device_printf(sc->jme_dev, "could not allocate DMA'able "
1351 "memory for shared status block.\n");
1355 ctx.jme_busaddr = 0;
1356 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1357 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1358 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1359 if (error != 0 || ctx.jme_busaddr == 0) {
1360 device_printf(sc->jme_dev, "could not load DMA'able memory "
1361 "for shared status block.\n");
1364 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1366 /* Create DMA maps for Tx buffers. */
1367 for (i = 0; i < JME_TX_RING_CNT; i++) {
1368 txd = &sc->jme_cdata.jme_txdesc[i];
1370 txd->tx_dmamap = NULL;
1371 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1374 device_printf(sc->jme_dev,
1375 "could not create Tx dmamap.\n");
1379 /* Create DMA maps for Rx buffers. */
1380 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1381 &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1382 device_printf(sc->jme_dev,
1383 "could not create spare Rx dmamap.\n");
1386 for (i = 0; i < JME_RX_RING_CNT; i++) {
1387 rxd = &sc->jme_cdata.jme_rxdesc[i];
1389 rxd->rx_dmamap = NULL;
1390 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1393 device_printf(sc->jme_dev,
1394 "could not create Rx dmamap.\n");
1404 jme_dma_free(struct jme_softc *sc)
1406 struct jme_txdesc *txd;
1407 struct jme_rxdesc *rxd;
1411 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1412 if (sc->jme_rdata.jme_tx_ring_paddr)
1413 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1414 sc->jme_cdata.jme_tx_ring_map);
1415 if (sc->jme_rdata.jme_tx_ring)
1416 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1417 sc->jme_rdata.jme_tx_ring,
1418 sc->jme_cdata.jme_tx_ring_map);
1419 sc->jme_rdata.jme_tx_ring = NULL;
1420 sc->jme_rdata.jme_tx_ring_paddr = 0;
1421 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1422 sc->jme_cdata.jme_tx_ring_tag = NULL;
1425 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1426 if (sc->jme_rdata.jme_rx_ring_paddr)
1427 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1428 sc->jme_cdata.jme_rx_ring_map);
1429 if (sc->jme_rdata.jme_rx_ring)
1430 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1431 sc->jme_rdata.jme_rx_ring,
1432 sc->jme_cdata.jme_rx_ring_map);
1433 sc->jme_rdata.jme_rx_ring = NULL;
1434 sc->jme_rdata.jme_rx_ring_paddr = 0;
1435 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1436 sc->jme_cdata.jme_rx_ring_tag = NULL;
1439 if (sc->jme_cdata.jme_tx_tag != NULL) {
1440 for (i = 0; i < JME_TX_RING_CNT; i++) {
1441 txd = &sc->jme_cdata.jme_txdesc[i];
1442 if (txd->tx_dmamap != NULL) {
1443 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1445 txd->tx_dmamap = NULL;
1448 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1449 sc->jme_cdata.jme_tx_tag = NULL;
1452 if (sc->jme_cdata.jme_rx_tag != NULL) {
1453 for (i = 0; i < JME_RX_RING_CNT; i++) {
1454 rxd = &sc->jme_cdata.jme_rxdesc[i];
1455 if (rxd->rx_dmamap != NULL) {
1456 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1458 rxd->rx_dmamap = NULL;
1461 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1462 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1463 sc->jme_cdata.jme_rx_sparemap);
1464 sc->jme_cdata.jme_rx_sparemap = NULL;
1466 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1467 sc->jme_cdata.jme_rx_tag = NULL;
1470 /* Shared status block. */
1471 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1472 if (sc->jme_rdata.jme_ssb_block_paddr)
1473 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1474 sc->jme_cdata.jme_ssb_map);
1475 if (sc->jme_rdata.jme_ssb_block)
1476 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1477 sc->jme_rdata.jme_ssb_block,
1478 sc->jme_cdata.jme_ssb_map);
1479 sc->jme_rdata.jme_ssb_block = NULL;
1480 sc->jme_rdata.jme_ssb_block_paddr = 0;
1481 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1482 sc->jme_cdata.jme_ssb_tag = NULL;
1485 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1486 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1487 sc->jme_cdata.jme_buffer_tag = NULL;
1489 if (sc->jme_cdata.jme_ring_tag != NULL) {
1490 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1491 sc->jme_cdata.jme_ring_tag = NULL;
1496 * Make sure the interface is stopped at reboot time.
1499 jme_shutdown(device_t dev)
1502 return (jme_suspend(dev));
1506 * Unlike other ethernet controllers, JMC250 requires
1507 * explicit resetting link speed to 10/100Mbps as gigabit
1508 * link will cunsume more power than 375mA.
1509 * Note, we reset the link speed to 10/100Mbps with
1510 * auto-negotiation but we don't know whether that operation
1511 * would succeed or not as we have no control after powering
1512 * off. If the renegotiation fail WOL may not work. Running
1513 * at 1Gbps draws more power than 375mA at 3.3V which is
1514 * specified in PCI specification and that would result in
1515 * complete shutdowning power to ethernet controller.
1518 * Save current negotiated media speed/duplex/flow-control
1519 * to softc and restore the same link again after resuming.
1520 * PHY handling such as power down/resetting to 100Mbps
1521 * may be better handled in suspend method in phy driver.
1524 jme_setlinkspeed(struct jme_softc *sc)
1526 struct mii_data *mii;
1529 JME_LOCK_ASSERT(sc);
1531 mii = device_get_softc(sc->jme_miibus);
1534 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1535 switch IFM_SUBTYPE(mii->mii_media_active) {
1545 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1546 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1547 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1548 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1549 BMCR_AUTOEN | BMCR_STARTNEG);
1552 /* Poll link state until jme(4) get a 10/100 link. */
1553 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1555 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1556 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1566 pause("jmelnk", hz);
1569 if (i == MII_ANEGTICKS_GIGE)
1570 device_printf(sc->jme_dev, "establishing link failed, "
1571 "WOL may not work!");
1574 * No link, force MAC to have 100Mbps, full-duplex link.
1575 * This is the last resort and may/may not work.
1577 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1578 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1583 jme_setwol(struct jme_softc *sc)
1590 JME_LOCK_ASSERT(sc);
1592 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1593 /* Remove Tx MAC/offload clock to save more power. */
1594 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1595 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1596 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1597 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1598 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
1599 CSR_WRITE_4(sc, JME_GPREG1,
1600 CSR_READ_4(sc, JME_GPREG1) | GPREG1_RX_MAC_CLK_DIS);
1601 /* No PME capability, PHY power down. */
1607 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1608 pmcs = CSR_READ_4(sc, JME_PMCS);
1609 pmcs &= ~PMCS_WOL_ENB_MASK;
1610 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1611 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1612 /* Enable PME message. */
1613 gpr |= GPREG0_PME_ENB;
1614 /* For gigabit controllers, reset link speed to 10/100. */
1615 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1616 jme_setlinkspeed(sc);
1619 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1620 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1621 /* Remove Tx MAC/offload clock to save more power. */
1622 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
1623 CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
1624 ~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
1625 GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
1627 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1628 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1629 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1630 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1631 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1632 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1633 /* No WOL, PHY power down. */
1639 jme_suspend(device_t dev)
1641 struct jme_softc *sc;
1643 sc = device_get_softc(dev);
1654 jme_resume(device_t dev)
1656 struct jme_softc *sc;
1661 sc = device_get_softc(dev);
1664 if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) {
1665 pmstat = pci_read_config(sc->jme_dev,
1666 pmc + PCIR_POWER_STATUS, 2);
1667 /* Disable PME clear PME status. */
1668 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1669 pci_write_config(sc->jme_dev,
1670 pmc + PCIR_POWER_STATUS, pmstat, 2);
1675 if ((ifp->if_flags & IFF_UP) != 0) {
1676 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1677 jme_init_locked(sc);
1686 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1688 struct jme_txdesc *txd;
1689 struct jme_desc *desc;
1691 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1692 int error, i, nsegs, prod;
1693 uint32_t cflags, tsosegsz;
1695 JME_LOCK_ASSERT(sc);
1697 M_ASSERTPKTHDR((*m_head));
1699 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1701 * Due to the adherence to NDIS specification JMC250
1702 * assumes upper stack computed TCP pseudo checksum
1703 * without including payload length. This breaks
1704 * checksum offload for TSO case so recompute TCP
1705 * pseudo checksum for JMC250. Hopefully this wouldn't
1706 * be much burden on modern CPUs.
1708 struct ether_header *eh;
1711 uint32_t ip_off, poff;
1713 if (M_WRITABLE(*m_head) == 0) {
1714 /* Get a writable copy. */
1715 m = m_dup(*m_head, M_NOWAIT);
1723 ip_off = sizeof(struct ether_header);
1724 m = m_pullup(*m_head, ip_off);
1729 eh = mtod(m, struct ether_header *);
1730 /* Check the existence of VLAN tag. */
1731 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1732 ip_off = sizeof(struct ether_vlan_header);
1733 m = m_pullup(m, ip_off);
1739 m = m_pullup(m, ip_off + sizeof(struct ip));
1744 ip = (struct ip *)(mtod(m, char *) + ip_off);
1745 poff = ip_off + (ip->ip_hl << 2);
1746 m = m_pullup(m, poff + sizeof(struct tcphdr));
1752 * Reset IP checksum and recompute TCP pseudo
1753 * checksum that NDIS specification requires.
1755 ip = (struct ip *)(mtod(m, char *) + ip_off);
1756 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1758 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1759 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1761 htons((tcp->th_off << 2) + IPPROTO_TCP));
1762 /* No need to TSO, force IP checksum offload. */
1763 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1764 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1766 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1767 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1771 prod = sc->jme_cdata.jme_tx_prod;
1772 txd = &sc->jme_cdata.jme_txdesc[prod];
1774 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1775 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1776 if (error == EFBIG) {
1777 m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS);
1784 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1785 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1791 } else if (error != 0)
1800 * Check descriptor overrun. Leave one free descriptor.
1801 * Since we always use 64bit address mode for transmitting,
1802 * each Tx request requires one more dummy descriptor.
1804 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1805 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1812 /* Configure checksum offload and TSO. */
1813 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1814 tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1816 cflags |= JME_TD_TSO;
1818 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1819 cflags |= JME_TD_IPCSUM;
1820 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1821 cflags |= JME_TD_TCPCSUM;
1822 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1823 cflags |= JME_TD_UDPCSUM;
1825 /* Configure VLAN. */
1826 if ((m->m_flags & M_VLANTAG) != 0) {
1827 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1828 cflags |= JME_TD_VLAN_TAG;
1831 desc = &sc->jme_rdata.jme_tx_ring[prod];
1832 desc->flags = htole32(cflags);
1833 desc->buflen = htole32(tsosegsz);
1834 desc->addr_hi = htole32(m->m_pkthdr.len);
1836 sc->jme_cdata.jme_tx_cnt++;
1837 JME_DESC_INC(prod, JME_TX_RING_CNT);
1838 for (i = 0; i < nsegs; i++) {
1839 desc = &sc->jme_rdata.jme_tx_ring[prod];
1840 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1841 desc->buflen = htole32(txsegs[i].ds_len);
1842 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1843 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1844 sc->jme_cdata.jme_tx_cnt++;
1845 JME_DESC_INC(prod, JME_TX_RING_CNT);
1848 /* Update producer index. */
1849 sc->jme_cdata.jme_tx_prod = prod;
1851 * Finally request interrupt and give the first descriptor
1852 * owenership to hardware.
1854 desc = txd->tx_desc;
1855 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1858 txd->tx_ndesc = nsegs + 1;
1860 /* Sync descriptors. */
1861 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1862 BUS_DMASYNC_PREWRITE);
1863 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1864 sc->jme_cdata.jme_tx_ring_map,
1865 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1871 jme_start(struct ifnet *ifp)
1873 struct jme_softc *sc;
1877 jme_start_locked(ifp);
1882 jme_start_locked(struct ifnet *ifp)
1884 struct jme_softc *sc;
1885 struct mbuf *m_head;
1890 JME_LOCK_ASSERT(sc);
1892 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1895 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1896 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0)
1899 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1900 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1904 * Pack the data into the transmit ring. If we
1905 * don't have room, set the OACTIVE flag and wait
1906 * for the NIC to drain the ring.
1908 if (jme_encap(sc, &m_head)) {
1911 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1912 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1918 * If there's a BPF listener, bounce a copy of this frame
1921 ETHER_BPF_MTAP(ifp, m_head);
1926 * Reading TXCSR takes very long time under heavy load
1927 * so cache TXCSR value and writes the ORed value with
1928 * the kick command to the TXCSR. This saves one register
1931 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1932 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1933 /* Set a timeout in case the chip goes out to lunch. */
1934 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1939 jme_watchdog(struct jme_softc *sc)
1943 JME_LOCK_ASSERT(sc);
1945 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1949 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1950 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1951 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1952 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1953 jme_init_locked(sc);
1957 if (sc->jme_cdata.jme_tx_cnt == 0) {
1958 if_printf(sc->jme_ifp,
1959 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1960 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1961 jme_start_locked(ifp);
1965 if_printf(sc->jme_ifp, "watchdog timeout\n");
1966 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1967 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1968 jme_init_locked(sc);
1969 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1970 jme_start_locked(ifp);
1974 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1976 struct jme_softc *sc;
1978 struct mii_data *mii;
1983 ifr = (struct ifreq *)data;
1987 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1988 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1989 ifr->ifr_mtu > JME_MAX_MTU)) {
1994 if (ifp->if_mtu != ifr->ifr_mtu) {
1996 * No special configuration is required when interface
1997 * MTU is changed but availability of TSO/Tx checksum
1998 * offload should be chcked against new MTU size as
1999 * FIFO size is just 2K.
2002 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
2003 ifp->if_capenable &=
2004 ~(IFCAP_TXCSUM | IFCAP_TSO4);
2006 ~(JME_CSUM_FEATURES | CSUM_TSO);
2007 VLAN_CAPABILITIES(ifp);
2009 ifp->if_mtu = ifr->ifr_mtu;
2010 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2011 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2012 jme_init_locked(sc);
2019 if ((ifp->if_flags & IFF_UP) != 0) {
2020 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2021 if (((ifp->if_flags ^ sc->jme_if_flags)
2022 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2025 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
2026 jme_init_locked(sc);
2029 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2032 sc->jme_if_flags = ifp->if_flags;
2038 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2044 mii = device_get_softc(sc->jme_miibus);
2045 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
2049 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2050 if ((mask & IFCAP_TXCSUM) != 0 &&
2051 ifp->if_mtu < JME_TX_FIFO_SIZE) {
2052 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
2053 ifp->if_capenable ^= IFCAP_TXCSUM;
2054 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
2055 ifp->if_hwassist |= JME_CSUM_FEATURES;
2057 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
2060 if ((mask & IFCAP_RXCSUM) != 0 &&
2061 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
2062 ifp->if_capenable ^= IFCAP_RXCSUM;
2063 reg = CSR_READ_4(sc, JME_RXMAC);
2064 reg &= ~RXMAC_CSUM_ENB;
2065 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2066 reg |= RXMAC_CSUM_ENB;
2067 CSR_WRITE_4(sc, JME_RXMAC, reg);
2069 if ((mask & IFCAP_TSO4) != 0 &&
2070 ifp->if_mtu < JME_TX_FIFO_SIZE) {
2071 if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
2072 ifp->if_capenable ^= IFCAP_TSO4;
2073 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
2074 ifp->if_hwassist |= CSUM_TSO;
2076 ifp->if_hwassist &= ~CSUM_TSO;
2079 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
2080 (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
2081 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
2082 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
2083 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
2084 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
2085 if ((mask & IFCAP_VLAN_HWTSO) != 0 &&
2086 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0)
2087 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
2088 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
2089 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
2090 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
2094 VLAN_CAPABILITIES(ifp);
2097 error = ether_ioctl(ifp, cmd, data);
2105 jme_mac_config(struct jme_softc *sc)
2107 struct mii_data *mii;
2108 uint32_t ghc, gpreg, rxmac, txmac, txpause;
2111 JME_LOCK_ASSERT(sc);
2113 mii = device_get_softc(sc->jme_miibus);
2115 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2117 CSR_WRITE_4(sc, JME_GHC, 0);
2120 rxmac = CSR_READ_4(sc, JME_RXMAC);
2121 rxmac &= ~RXMAC_FC_ENB;
2122 txmac = CSR_READ_4(sc, JME_TXMAC);
2123 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
2124 txpause = CSR_READ_4(sc, JME_TXPFC);
2125 txpause &= ~TXPFC_PAUSE_ENB;
2126 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
2127 ghc |= GHC_FULL_DUPLEX;
2128 rxmac &= ~RXMAC_COLL_DET_ENB;
2129 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
2130 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
2132 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
2133 txpause |= TXPFC_PAUSE_ENB;
2134 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
2135 rxmac |= RXMAC_FC_ENB;
2136 /* Disable retry transmit timer/retry limit. */
2137 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
2138 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
2140 rxmac |= RXMAC_COLL_DET_ENB;
2141 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
2142 /* Enable retry transmit timer/retry limit. */
2143 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
2144 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
2146 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
2147 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2149 ghc |= GHC_SPEED_10;
2150 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2153 ghc |= GHC_SPEED_100;
2154 txclk |= GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100;
2157 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2159 ghc |= GHC_SPEED_1000;
2160 txclk |= GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000;
2161 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
2162 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
2167 if (sc->jme_rev == DEVICEID_JMC250 &&
2168 sc->jme_chip_rev == DEVICEREVID_JMC250_A2) {
2170 * Workaround occasional packet loss issue of JMC250 A2
2171 * when it runs on half-duplex media.
2173 gpreg = CSR_READ_4(sc, JME_GPREG1);
2174 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
2175 gpreg &= ~GPREG1_HDPX_FIX;
2177 gpreg |= GPREG1_HDPX_FIX;
2178 CSR_WRITE_4(sc, JME_GPREG1, gpreg);
2179 /* Workaround CRC errors at 100Mbps on JMC250 A2. */
2180 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) {
2181 /* Extend interface FIFO depth. */
2182 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2185 /* Select default interface FIFO depth. */
2186 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
2190 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2192 CSR_WRITE_4(sc, JME_GHC, ghc);
2193 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2194 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2195 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2199 jme_link_task(void *arg, int pending)
2201 struct jme_softc *sc;
2202 struct mii_data *mii;
2204 struct jme_txdesc *txd;
2208 sc = (struct jme_softc *)arg;
2211 mii = device_get_softc(sc->jme_miibus);
2213 if (mii == NULL || ifp == NULL ||
2214 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2219 sc->jme_flags &= ~JME_FLAG_LINK;
2220 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2221 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2224 sc->jme_flags |= JME_FLAG_LINK;
2227 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
2229 sc->jme_flags |= JME_FLAG_LINK;
2237 * Disabling Rx/Tx MACs have a side-effect of resetting
2238 * JME_TXNDA/JME_RXNDA register to the first address of
2239 * Tx/Rx descriptor address. So driver should reset its
2240 * internal procucer/consumer pointer and reclaim any
2241 * allocated resources. Note, just saving the value of
2242 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2243 * and restoring JME_TXNDA/JME_RXNDA register is not
2244 * sufficient to make sure correct MAC state because
2245 * stopping MAC operation can take a while and hardware
2246 * might have updated JME_TXNDA/JME_RXNDA registers
2247 * during the stop operation.
2249 /* Block execution of task. */
2250 taskqueue_block(sc->jme_tq);
2251 /* Disable interrupts and stop driver. */
2252 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2253 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2254 callout_stop(&sc->jme_tick_ch);
2255 sc->jme_watchdog_timer = 0;
2257 /* Stop receiver/transmitter. */
2261 /* XXX Drain all queued tasks. */
2263 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2266 if (sc->jme_cdata.jme_rxhead != NULL)
2267 m_freem(sc->jme_cdata.jme_rxhead);
2268 JME_RXCHAIN_RESET(sc);
2270 if (sc->jme_cdata.jme_tx_cnt != 0) {
2271 /* Remove queued packets for transmit. */
2272 for (i = 0; i < JME_TX_RING_CNT; i++) {
2273 txd = &sc->jme_cdata.jme_txdesc[i];
2274 if (txd->tx_m != NULL) {
2276 sc->jme_cdata.jme_tx_tag,
2278 BUS_DMASYNC_POSTWRITE);
2280 sc->jme_cdata.jme_tx_tag,
2285 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2291 * Reuse configured Rx descriptors and reset
2292 * producer/consumer index.
2294 sc->jme_cdata.jme_rx_cons = 0;
2295 sc->jme_morework = 0;
2296 jme_init_tx_ring(sc);
2297 /* Initialize shadow status block. */
2300 /* Program MAC with resolved speed/duplex/flow-control. */
2301 if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2303 jme_stats_clear(sc);
2305 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2306 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2308 /* Set Tx ring address to the hardware. */
2309 paddr = JME_TX_RING_ADDR(sc, 0);
2310 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2311 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2313 /* Set Rx ring address to the hardware. */
2314 paddr = JME_RX_RING_ADDR(sc, 0);
2315 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2316 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2318 /* Restart receiver/transmitter. */
2319 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2321 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2322 /* Lastly enable TX/RX clock. */
2323 if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
2324 CSR_WRITE_4(sc, JME_GHC,
2325 CSR_READ_4(sc, JME_GHC) & ~GHC_TX_MAC_CLK_DIS);
2326 if ((sc->jme_flags & JME_FLAG_RXCLK) != 0)
2327 CSR_WRITE_4(sc, JME_GPREG1,
2328 CSR_READ_4(sc, JME_GPREG1) & ~GPREG1_RX_MAC_CLK_DIS);
2331 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2332 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2333 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2334 /* Unblock execution of task. */
2335 taskqueue_unblock(sc->jme_tq);
2336 /* Reenable interrupts. */
2337 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2345 struct jme_softc *sc;
2348 sc = (struct jme_softc *)arg;
2350 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2351 if (status == 0 || status == 0xFFFFFFFF)
2352 return (FILTER_STRAY);
2353 /* Disable interrupts. */
2354 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2355 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2357 return (FILTER_HANDLED);
2361 jme_int_task(void *arg, int pending)
2363 struct jme_softc *sc;
2368 sc = (struct jme_softc *)arg;
2372 status = CSR_READ_4(sc, JME_INTR_STATUS);
2373 if (sc->jme_morework != 0) {
2374 sc->jme_morework = 0;
2375 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2377 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2379 /* Reset PCC counter/timer and Ack interrupts. */
2380 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2381 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2382 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2383 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2384 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2385 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2387 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2388 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2389 more = jme_rxintr(sc, sc->jme_process_limit);
2391 sc->jme_morework = 1;
2393 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2395 * Notify hardware availability of new Rx
2397 * Reading RXCSR takes very long time under
2398 * heavy load so cache RXCSR value and writes
2399 * the ORed value with the kick command to
2400 * the RXCSR. This saves one register access
2403 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2404 RXCSR_RX_ENB | RXCSR_RXQ_START);
2406 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2407 jme_start_locked(ifp);
2410 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2411 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2418 /* Reenable interrupts. */
2419 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2423 jme_txeof(struct jme_softc *sc)
2426 struct jme_txdesc *txd;
2430 JME_LOCK_ASSERT(sc);
2434 cons = sc->jme_cdata.jme_tx_cons;
2435 if (cons == sc->jme_cdata.jme_tx_prod)
2438 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2439 sc->jme_cdata.jme_tx_ring_map,
2440 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2443 * Go through our Tx list and free mbufs for those
2444 * frames which have been transmitted.
2446 for (; cons != sc->jme_cdata.jme_tx_prod;) {
2447 txd = &sc->jme_cdata.jme_txdesc[cons];
2448 status = le32toh(txd->tx_desc->flags);
2449 if ((status & JME_TD_OWN) == JME_TD_OWN)
2452 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2453 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2455 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2456 if ((status & JME_TD_COLLISION) != 0)
2457 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
2458 le32toh(txd->tx_desc->buflen) &
2459 JME_TD_BUF_LEN_MASK);
2462 * Only the first descriptor of multi-descriptor
2463 * transmission is updated so driver have to skip entire
2464 * chained buffers for the transmiited frame. In other
2465 * words, JME_TD_OWN bit is valid only at the first
2466 * descriptor of a multi-descriptor transmission.
2468 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2469 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2470 JME_DESC_INC(cons, JME_TX_RING_CNT);
2473 /* Reclaim transferred mbufs. */
2474 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2475 BUS_DMASYNC_POSTWRITE);
2476 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2478 KASSERT(txd->tx_m != NULL,
2479 ("%s: freeing NULL mbuf!\n", __func__));
2482 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2483 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2484 ("%s: Active Tx desc counter was garbled\n", __func__));
2486 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2488 sc->jme_cdata.jme_tx_cons = cons;
2489 /* Unarm watchog timer when there is no pending descriptors in queue. */
2490 if (sc->jme_cdata.jme_tx_cnt == 0)
2491 sc->jme_watchdog_timer = 0;
2493 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2494 sc->jme_cdata.jme_tx_ring_map,
2495 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2498 static __inline void
2499 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2501 struct jme_desc *desc;
2503 desc = &sc->jme_rdata.jme_rx_ring[cons];
2504 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2505 desc->buflen = htole32(MCLBYTES);
2508 /* Receive a frame. */
2510 jme_rxeof(struct jme_softc *sc)
2513 struct jme_desc *desc;
2514 struct jme_rxdesc *rxd;
2515 struct mbuf *mp, *m;
2516 uint32_t flags, status;
2517 int cons, count, nsegs;
2519 JME_LOCK_ASSERT(sc);
2523 cons = sc->jme_cdata.jme_rx_cons;
2524 desc = &sc->jme_rdata.jme_rx_ring[cons];
2525 flags = le32toh(desc->flags);
2526 status = le32toh(desc->buflen);
2527 nsegs = JME_RX_NSEGS(status);
2528 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2529 if ((status & JME_RX_ERR_STAT) != 0) {
2530 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2531 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2532 #ifdef JME_SHOW_ERRORS
2533 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2534 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2536 sc->jme_cdata.jme_rx_cons += nsegs;
2537 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2541 for (count = 0; count < nsegs; count++,
2542 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2543 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2545 /* Add a new receive buffer to the ring. */
2546 if (jme_newbuf(sc, rxd) != 0) {
2547 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2549 for (; count < nsegs; count++) {
2550 jme_discard_rxbuf(sc, cons);
2551 JME_DESC_INC(cons, JME_RX_RING_CNT);
2553 if (sc->jme_cdata.jme_rxhead != NULL) {
2554 m_freem(sc->jme_cdata.jme_rxhead);
2555 JME_RXCHAIN_RESET(sc);
2561 * Assume we've received a full sized frame.
2562 * Actual size is fixed when we encounter the end of
2563 * multi-segmented frame.
2565 mp->m_len = MCLBYTES;
2567 /* Chain received mbufs. */
2568 if (sc->jme_cdata.jme_rxhead == NULL) {
2569 sc->jme_cdata.jme_rxhead = mp;
2570 sc->jme_cdata.jme_rxtail = mp;
2573 * Receive processor can receive a maximum frame
2574 * size of 65535 bytes.
2576 mp->m_flags &= ~M_PKTHDR;
2577 sc->jme_cdata.jme_rxtail->m_next = mp;
2578 sc->jme_cdata.jme_rxtail = mp;
2581 if (count == nsegs - 1) {
2582 /* Last desc. for this frame. */
2583 m = sc->jme_cdata.jme_rxhead;
2584 m->m_flags |= M_PKTHDR;
2585 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2587 /* Set first mbuf size. */
2588 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2589 /* Set last mbuf size. */
2590 mp->m_len = sc->jme_cdata.jme_rxlen -
2591 ((MCLBYTES - JME_RX_PAD_BYTES) +
2592 (MCLBYTES * (nsegs - 2)));
2594 m->m_len = sc->jme_cdata.jme_rxlen;
2595 m->m_pkthdr.rcvif = ifp;
2598 * Account for 10bytes auto padding which is used
2599 * to align IP header on 32bit boundary. Also note,
2600 * CRC bytes is automatically removed by the
2603 m->m_data += JME_RX_PAD_BYTES;
2605 /* Set checksum information. */
2606 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2607 (flags & JME_RD_IPV4) != 0) {
2608 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2609 if ((flags & JME_RD_IPCSUM) != 0)
2610 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2611 if (((flags & JME_RD_MORE_FRAG) == 0) &&
2612 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2613 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2614 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2615 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2616 m->m_pkthdr.csum_flags |=
2617 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2618 m->m_pkthdr.csum_data = 0xffff;
2622 /* Check for VLAN tagged packets. */
2623 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2624 (flags & JME_RD_VLAN_TAG) != 0) {
2625 m->m_pkthdr.ether_vtag =
2626 flags & JME_RD_VLAN_MASK;
2627 m->m_flags |= M_VLANTAG;
2630 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2633 (*ifp->if_input)(ifp, m);
2636 /* Reset mbuf chains. */
2637 JME_RXCHAIN_RESET(sc);
2641 sc->jme_cdata.jme_rx_cons += nsegs;
2642 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2646 jme_rxintr(struct jme_softc *sc, int count)
2648 struct jme_desc *desc;
2649 int nsegs, prog, pktlen;
2651 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2652 sc->jme_cdata.jme_rx_ring_map,
2653 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2655 for (prog = 0; count > 0; prog++) {
2656 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2657 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2659 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2661 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2663 * Check number of segments against received bytes.
2664 * Non-matching value would indicate that hardware
2665 * is still trying to update Rx descriptors. I'm not
2666 * sure whether this check is needed.
2668 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2669 if (nsegs != howmany(pktlen, MCLBYTES))
2672 /* Received a frame. */
2678 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2679 sc->jme_cdata.jme_rx_ring_map,
2680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2682 return (count > 0 ? 0 : EAGAIN);
2688 struct jme_softc *sc;
2689 struct mii_data *mii;
2691 sc = (struct jme_softc *)arg;
2693 JME_LOCK_ASSERT(sc);
2695 mii = device_get_softc(sc->jme_miibus);
2698 * Reclaim Tx buffers that have been completed. It's not
2699 * needed here but it would release allocated mbuf chains
2700 * faster and limit the maximum delay to a hz.
2703 jme_stats_update(sc);
2705 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2709 jme_reset(struct jme_softc *sc)
2711 uint32_t ghc, gpreg;
2713 /* Stop receiver, transmitter. */
2717 /* Reset controller. */
2718 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2719 CSR_READ_4(sc, JME_GHC);
2722 * Workaround Rx FIFO overruns seen under certain conditions.
2723 * Explicitly synchorize TX/RX clock. TX/RX clock should be
2724 * enabled only after enabling TX/RX MACs.
2726 if ((sc->jme_flags & (JME_FLAG_TXCLK | JME_FLAG_RXCLK)) != 0) {
2727 /* Disable TX clock. */
2728 CSR_WRITE_4(sc, JME_GHC, GHC_RESET | GHC_TX_MAC_CLK_DIS);
2729 /* Disable RX clock. */
2730 gpreg = CSR_READ_4(sc, JME_GPREG1);
2731 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2732 gpreg = CSR_READ_4(sc, JME_GPREG1);
2733 /* De-assert RESET but still disable TX clock. */
2734 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2735 ghc = CSR_READ_4(sc, JME_GHC);
2737 /* Enable TX clock. */
2738 CSR_WRITE_4(sc, JME_GHC, ghc & ~GHC_TX_MAC_CLK_DIS);
2739 /* Enable RX clock. */
2740 CSR_WRITE_4(sc, JME_GPREG1, gpreg & ~GPREG1_RX_MAC_CLK_DIS);
2741 CSR_READ_4(sc, JME_GPREG1);
2743 /* Disable TX/RX clock again. */
2744 CSR_WRITE_4(sc, JME_GHC, GHC_TX_MAC_CLK_DIS);
2745 CSR_WRITE_4(sc, JME_GPREG1, gpreg | GPREG1_RX_MAC_CLK_DIS);
2747 CSR_WRITE_4(sc, JME_GHC, 0);
2748 CSR_READ_4(sc, JME_GHC);
2755 struct jme_softc *sc;
2757 sc = (struct jme_softc *)xsc;
2759 jme_init_locked(sc);
2764 jme_init_locked(struct jme_softc *sc)
2767 struct mii_data *mii;
2772 JME_LOCK_ASSERT(sc);
2775 mii = device_get_softc(sc->jme_miibus);
2777 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
2780 * Cancel any pending I/O.
2785 * Reset the chip to a known state.
2789 /* Init descriptors. */
2790 error = jme_init_rx_ring(sc);
2792 device_printf(sc->jme_dev,
2793 "%s: initialization failed: no memory for Rx buffers.\n",
2798 jme_init_tx_ring(sc);
2799 /* Initialize shadow status block. */
2802 /* Reprogram the station address. */
2803 jme_set_macaddr(sc, IF_LLADDR(sc->jme_ifp));
2806 * Configure Tx queue.
2807 * Tx priority queue weight value : 0
2808 * Tx FIFO threshold for processing next packet : 16QW
2809 * Maximum Tx DMA length : 512
2810 * Allow Tx DMA burst.
2812 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2813 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2814 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2815 sc->jme_txcsr |= sc->jme_tx_dma_size;
2816 sc->jme_txcsr |= TXCSR_DMA_BURST;
2817 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2819 /* Set Tx descriptor counter. */
2820 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2822 /* Set Tx ring address to the hardware. */
2823 paddr = JME_TX_RING_ADDR(sc, 0);
2824 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2825 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2827 /* Configure TxMAC parameters. */
2828 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2829 reg |= TXMAC_THRESH_1_PKT;
2830 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2831 CSR_WRITE_4(sc, JME_TXMAC, reg);
2834 * Configure Rx queue.
2835 * FIFO full threshold for transmitting Tx pause packet : 128T
2836 * FIFO threshold for processing next packet : 128QW
2838 * Max Rx DMA length : 128
2839 * Rx descriptor retry : 32
2840 * Rx descriptor retry time gap : 256ns
2841 * Don't receive runt/bad frame.
2843 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2845 * Since Rx FIFO size is 4K bytes, receiving frames larger
2846 * than 4K bytes will suffer from Rx FIFO overruns. So
2847 * decrease FIFO threshold to reduce the FIFO overruns for
2848 * frames larger than 4000 bytes.
2849 * For best performance of standard MTU sized frames use
2850 * maximum allowable FIFO threshold, 128QW. Note these do
2851 * not hold on chip full mask verion >=2. For these
2852 * controllers 64QW and 128QW are not valid value.
2854 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 2)
2855 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2857 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2858 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2859 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2861 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2863 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2864 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2865 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2866 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2868 /* Set Rx descriptor counter. */
2869 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2871 /* Set Rx ring address to the hardware. */
2872 paddr = JME_RX_RING_ADDR(sc, 0);
2873 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2874 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2876 /* Clear receive filter. */
2877 CSR_WRITE_4(sc, JME_RXMAC, 0);
2878 /* Set up the receive filter. */
2883 * Disable all WOL bits as WOL can interfere normal Rx
2884 * operation. Also clear WOL detection status bits.
2886 reg = CSR_READ_4(sc, JME_PMCS);
2887 reg &= ~PMCS_WOL_ENB_MASK;
2888 CSR_WRITE_4(sc, JME_PMCS, reg);
2890 reg = CSR_READ_4(sc, JME_RXMAC);
2892 * Pad 10bytes right before received frame. This will greatly
2893 * help Rx performance on strict-alignment architectures as
2894 * it does not need to copy the frame to align the payload.
2896 reg |= RXMAC_PAD_10BYTES;
2897 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2898 reg |= RXMAC_CSUM_ENB;
2899 CSR_WRITE_4(sc, JME_RXMAC, reg);
2901 /* Configure general purpose reg0 */
2902 reg = CSR_READ_4(sc, JME_GPREG0);
2903 reg &= ~GPREG0_PCC_UNIT_MASK;
2904 /* Set PCC timer resolution to micro-seconds unit. */
2905 reg |= GPREG0_PCC_UNIT_US;
2907 * Disable all shadow register posting as we have to read
2908 * JME_INTR_STATUS register in jme_int_task. Also it seems
2909 * that it's hard to synchronize interrupt status between
2910 * hardware and software with shadow posting due to
2911 * requirements of bus_dmamap_sync(9).
2913 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2914 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2915 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2916 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2917 /* Disable posting of DW0. */
2918 reg &= ~GPREG0_POST_DW0_ENB;
2919 /* Clear PME message. */
2920 reg &= ~GPREG0_PME_ENB;
2921 /* Set PHY address. */
2922 reg &= ~GPREG0_PHY_ADDR_MASK;
2923 reg |= sc->jme_phyaddr;
2924 CSR_WRITE_4(sc, JME_GPREG0, reg);
2926 /* Configure Tx queue 0 packet completion coalescing. */
2927 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2929 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2930 PCCTX_COAL_PKT_MASK;
2931 reg |= PCCTX_COAL_TXQ0;
2932 CSR_WRITE_4(sc, JME_PCCTX, reg);
2934 /* Configure Rx queue 0 packet completion coalescing. */
2935 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2937 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2938 PCCRX_COAL_PKT_MASK;
2939 CSR_WRITE_4(sc, JME_PCCRX0, reg);
2942 * Configure PCD(Packet Completion Deferring). It seems PCD
2943 * generates an interrupt when the time interval between two
2944 * back-to-back incoming/outgoing packet is long enough for
2945 * it to reach its timer value 0. The arrival of new packets
2946 * after timer has started causes the PCD timer to restart.
2947 * Unfortunately, it's not clear how PCD is useful at this
2948 * moment, so just use the same of PCC parameters.
2950 if ((sc->jme_flags & JME_FLAG_PCCPCD) != 0) {
2951 sc->jme_rx_pcd_to = sc->jme_rx_coal_to;
2952 if (sc->jme_rx_coal_to > PCDRX_TO_MAX)
2953 sc->jme_rx_pcd_to = PCDRX_TO_MAX;
2954 sc->jme_tx_pcd_to = sc->jme_tx_coal_to;
2955 if (sc->jme_tx_coal_to > PCDTX_TO_MAX)
2956 sc->jme_tx_pcd_to = PCDTX_TO_MAX;
2957 reg = sc->jme_rx_pcd_to << PCDRX0_TO_THROTTLE_SHIFT;
2958 reg |= sc->jme_rx_pcd_to << PCDRX0_TO_SHIFT;
2959 CSR_WRITE_4(sc, PCDRX_REG(0), reg);
2960 reg = sc->jme_tx_pcd_to << PCDTX_TO_THROTTLE_SHIFT;
2961 reg |= sc->jme_tx_pcd_to << PCDTX_TO_SHIFT;
2962 CSR_WRITE_4(sc, JME_PCDTX, reg);
2965 /* Configure shadow status block but don't enable posting. */
2966 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2967 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2968 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2970 /* Disable Timer 1 and Timer 2. */
2971 CSR_WRITE_4(sc, JME_TIMER1, 0);
2972 CSR_WRITE_4(sc, JME_TIMER2, 0);
2974 /* Configure retry transmit period, retry limit value. */
2975 CSR_WRITE_4(sc, JME_TXTRHD,
2976 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2977 TXTRHD_RT_PERIOD_MASK) |
2978 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2979 TXTRHD_RT_LIMIT_SHIFT));
2982 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2984 /* Initialize the interrupt mask. */
2985 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2986 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2989 * Enabling Tx/Rx DMA engines and Rx queue processing is
2990 * done after detection of valid link in jme_link_task.
2993 sc->jme_flags &= ~JME_FLAG_LINK;
2994 /* Set the current media. */
2997 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2999 ifp->if_drv_flags |= IFF_DRV_RUNNING;
3000 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3004 jme_stop(struct jme_softc *sc)
3007 struct jme_txdesc *txd;
3008 struct jme_rxdesc *rxd;
3011 JME_LOCK_ASSERT(sc);
3013 * Mark the interface down and cancel the watchdog timer.
3016 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
3017 sc->jme_flags &= ~JME_FLAG_LINK;
3018 callout_stop(&sc->jme_tick_ch);
3019 sc->jme_watchdog_timer = 0;
3022 * Disable interrupts.
3024 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
3025 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
3027 /* Disable updating shadow status block. */
3028 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
3029 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
3031 /* Stop receiver, transmitter. */
3035 /* Reclaim Rx/Tx buffers that have been completed. */
3036 jme_rxintr(sc, JME_RX_RING_CNT);
3037 if (sc->jme_cdata.jme_rxhead != NULL)
3038 m_freem(sc->jme_cdata.jme_rxhead);
3039 JME_RXCHAIN_RESET(sc);
3042 * Free RX and TX mbufs still in the queues.
3044 for (i = 0; i < JME_RX_RING_CNT; i++) {
3045 rxd = &sc->jme_cdata.jme_rxdesc[i];
3046 if (rxd->rx_m != NULL) {
3047 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
3048 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3049 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
3055 for (i = 0; i < JME_TX_RING_CNT; i++) {
3056 txd = &sc->jme_cdata.jme_txdesc[i];
3057 if (txd->tx_m != NULL) {
3058 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
3059 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3060 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
3067 jme_stats_update(sc);
3072 jme_stop_tx(struct jme_softc *sc)
3077 reg = CSR_READ_4(sc, JME_TXCSR);
3078 if ((reg & TXCSR_TX_ENB) == 0)
3080 reg &= ~TXCSR_TX_ENB;
3081 CSR_WRITE_4(sc, JME_TXCSR, reg);
3082 for (i = JME_TIMEOUT; i > 0; i--) {
3084 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
3088 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
3092 jme_stop_rx(struct jme_softc *sc)
3097 reg = CSR_READ_4(sc, JME_RXCSR);
3098 if ((reg & RXCSR_RX_ENB) == 0)
3100 reg &= ~RXCSR_RX_ENB;
3101 CSR_WRITE_4(sc, JME_RXCSR, reg);
3102 for (i = JME_TIMEOUT; i > 0; i--) {
3104 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
3108 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
3112 jme_init_tx_ring(struct jme_softc *sc)
3114 struct jme_ring_data *rd;
3115 struct jme_txdesc *txd;
3118 sc->jme_cdata.jme_tx_prod = 0;
3119 sc->jme_cdata.jme_tx_cons = 0;
3120 sc->jme_cdata.jme_tx_cnt = 0;
3122 rd = &sc->jme_rdata;
3123 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
3124 for (i = 0; i < JME_TX_RING_CNT; i++) {
3125 txd = &sc->jme_cdata.jme_txdesc[i];
3127 txd->tx_desc = &rd->jme_tx_ring[i];
3131 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
3132 sc->jme_cdata.jme_tx_ring_map,
3133 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3137 jme_init_ssb(struct jme_softc *sc)
3139 struct jme_ring_data *rd;
3141 rd = &sc->jme_rdata;
3142 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
3143 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
3144 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3148 jme_init_rx_ring(struct jme_softc *sc)
3150 struct jme_ring_data *rd;
3151 struct jme_rxdesc *rxd;
3154 sc->jme_cdata.jme_rx_cons = 0;
3155 JME_RXCHAIN_RESET(sc);
3156 sc->jme_morework = 0;
3158 rd = &sc->jme_rdata;
3159 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
3160 for (i = 0; i < JME_RX_RING_CNT; i++) {
3161 rxd = &sc->jme_cdata.jme_rxdesc[i];
3163 rxd->rx_desc = &rd->jme_rx_ring[i];
3164 if (jme_newbuf(sc, rxd) != 0)
3168 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
3169 sc->jme_cdata.jme_rx_ring_map,
3170 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3176 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
3178 struct jme_desc *desc;
3180 bus_dma_segment_t segs[1];
3184 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3188 * JMC250 has 64bit boundary alignment limitation so jme(4)
3189 * takes advantage of 10 bytes padding feature of hardware
3190 * in order not to copy entire frame to align IP header on
3193 m->m_len = m->m_pkthdr.len = MCLBYTES;
3195 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
3196 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
3200 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
3202 if (rxd->rx_m != NULL) {
3203 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3204 BUS_DMASYNC_POSTREAD);
3205 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
3207 map = rxd->rx_dmamap;
3208 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
3209 sc->jme_cdata.jme_rx_sparemap = map;
3210 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
3211 BUS_DMASYNC_PREREAD);
3214 desc = rxd->rx_desc;
3215 desc->buflen = htole32(segs[0].ds_len);
3216 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
3217 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
3218 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
3224 jme_set_vlan(struct jme_softc *sc)
3229 JME_LOCK_ASSERT(sc);
3232 reg = CSR_READ_4(sc, JME_RXMAC);
3233 reg &= ~RXMAC_VLAN_ENB;
3234 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
3235 reg |= RXMAC_VLAN_ENB;
3236 CSR_WRITE_4(sc, JME_RXMAC, reg);
3240 jme_set_filter(struct jme_softc *sc)
3243 struct ifmultiaddr *ifma;
3248 JME_LOCK_ASSERT(sc);
3252 rxcfg = CSR_READ_4(sc, JME_RXMAC);
3253 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
3255 /* Always accept frames destined to our station address. */
3256 rxcfg |= RXMAC_UNICAST;
3257 if ((ifp->if_flags & IFF_BROADCAST) != 0)
3258 rxcfg |= RXMAC_BROADCAST;
3259 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3260 if ((ifp->if_flags & IFF_PROMISC) != 0)
3261 rxcfg |= RXMAC_PROMISC;
3262 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
3263 rxcfg |= RXMAC_ALLMULTI;
3264 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
3265 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
3266 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3271 * Set up the multicast address filter by passing all multicast
3272 * addresses through a CRC generator, and then using the low-order
3273 * 6 bits as an index into the 64 bit multicast hash table. The
3274 * high order bits select the register, while the rest of the bits
3275 * select the bit within the register.
3277 rxcfg |= RXMAC_MULTICAST;
3278 bzero(mchash, sizeof(mchash));
3280 if_maddr_rlock(ifp);
3281 CK_STAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) {
3282 if (ifma->ifma_addr->sa_family != AF_LINK)
3284 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3285 ifma->ifma_addr), ETHER_ADDR_LEN);
3287 /* Just want the 6 least significant bits. */
3290 /* Set the corresponding bit in the hash table. */
3291 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3293 if_maddr_runlock(ifp);
3295 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3296 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3297 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3301 jme_stats_clear(struct jme_softc *sc)
3304 JME_LOCK_ASSERT(sc);
3306 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3309 /* Disable and clear counters. */
3310 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3311 /* Activate hw counters. */
3312 CSR_WRITE_4(sc, JME_STATCSR, 0);
3313 CSR_READ_4(sc, JME_STATCSR);
3314 bzero(&sc->jme_stats, sizeof(struct jme_hw_stats));
3318 jme_stats_save(struct jme_softc *sc)
3321 JME_LOCK_ASSERT(sc);
3323 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3325 /* Save current counters. */
3326 bcopy(&sc->jme_stats, &sc->jme_ostats, sizeof(struct jme_hw_stats));
3327 /* Disable and clear counters. */
3328 CSR_WRITE_4(sc, JME_STATCSR, 0xFFFFFFFF);
3332 jme_stats_update(struct jme_softc *sc)
3334 struct jme_hw_stats *stat, *ostat;
3337 JME_LOCK_ASSERT(sc);
3339 if ((sc->jme_flags & JME_FLAG_HWMIB) == 0)
3341 stat = &sc->jme_stats;
3342 ostat = &sc->jme_ostats;
3343 stat->tx_good_frames = CSR_READ_4(sc, JME_STAT_TXGOOD);
3344 stat->rx_good_frames = CSR_READ_4(sc, JME_STAT_RXGOOD);
3345 reg = CSR_READ_4(sc, JME_STAT_CRCMII);
3346 stat->rx_crc_errs = (reg & STAT_RX_CRC_ERR_MASK) >>
3347 STAT_RX_CRC_ERR_SHIFT;
3348 stat->rx_mii_errs = (reg & STAT_RX_MII_ERR_MASK) >>
3349 STAT_RX_MII_ERR_SHIFT;
3350 reg = CSR_READ_4(sc, JME_STAT_RXERR);
3351 stat->rx_fifo_oflows = (reg & STAT_RXERR_OFLOW_MASK) >>
3352 STAT_RXERR_OFLOW_SHIFT;
3353 stat->rx_desc_empty = (reg & STAT_RXERR_MPTY_MASK) >>
3354 STAT_RXERR_MPTY_SHIFT;
3355 reg = CSR_READ_4(sc, JME_STAT_FAIL);
3356 stat->rx_bad_frames = (reg & STAT_FAIL_RX_MASK) >> STAT_FAIL_RX_SHIFT;
3357 stat->tx_bad_frames = (reg & STAT_FAIL_TX_MASK) >> STAT_FAIL_TX_SHIFT;
3359 /* Account for previous counters. */
3360 stat->rx_good_frames += ostat->rx_good_frames;
3361 stat->rx_crc_errs += ostat->rx_crc_errs;
3362 stat->rx_mii_errs += ostat->rx_mii_errs;
3363 stat->rx_fifo_oflows += ostat->rx_fifo_oflows;
3364 stat->rx_desc_empty += ostat->rx_desc_empty;
3365 stat->rx_bad_frames += ostat->rx_bad_frames;
3366 stat->tx_good_frames += ostat->tx_good_frames;
3367 stat->tx_bad_frames += ostat->tx_bad_frames;
3371 jme_phy_down(struct jme_softc *sc)
3375 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, BMCR_PDOWN);
3376 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3377 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3379 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3380 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3381 reg &= ~PE1_GIGA_PDOWN_MASK;
3382 reg |= PE1_GIGA_PDOWN_D3;
3383 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3388 jme_phy_up(struct jme_softc *sc)
3393 bmcr = jme_miibus_readreg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR);
3394 bmcr &= ~BMCR_PDOWN;
3395 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR, bmcr);
3396 if (CHIPMODE_REVFM(sc->jme_chip_rev) >= 5) {
3397 reg = CSR_READ_4(sc, JME_PHYPOWDN);
3399 CSR_WRITE_4(sc, JME_PHYPOWDN, reg);
3400 reg = pci_read_config(sc->jme_dev, JME_PCI_PE1, 4);
3401 reg &= ~PE1_GIGA_PDOWN_MASK;
3402 reg |= PE1_GIGA_PDOWN_DIS;
3403 pci_write_config(sc->jme_dev, JME_PCI_PE1, reg, 4);
3408 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3414 value = *(int *)arg1;
3415 error = sysctl_handle_int(oidp, &value, 0, req);
3416 if (error || req->newptr == NULL)
3418 if (value < low || value > high)
3420 *(int *)arg1 = value;
3426 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3428 return (sysctl_int_range(oidp, arg1, arg2, req,
3429 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3433 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3435 return (sysctl_int_range(oidp, arg1, arg2, req,
3436 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3440 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3442 return (sysctl_int_range(oidp, arg1, arg2, req,
3443 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3447 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3449 return (sysctl_int_range(oidp, arg1, arg2, req,
3450 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3454 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3456 return (sysctl_int_range(oidp, arg1, arg2, req,
3457 JME_PROC_MIN, JME_PROC_MAX));