2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/systm.h>
34 #include <sys/endian.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
39 #include <sys/module.h>
41 #include <sys/queue.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
49 #include <net/if_arp.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_vlan_var.h>
56 #include <netinet/in.h>
57 #include <netinet/in_systm.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
61 #include <dev/mii/mii.h>
62 #include <dev/mii/miivar.h>
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcivar.h>
67 #include <machine/atomic.h>
68 #include <machine/bus.h>
69 #include <machine/in_cksum.h>
71 #include <dev/jme/if_jmereg.h>
72 #include <dev/jme/if_jmevar.h>
74 /* "device miibus" required. See GENERIC if you get errors here. */
75 #include "miibus_if.h"
77 /* Define the following to disable printing Rx errors. */
78 #undef JME_SHOW_ERRORS
80 #define JME_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
82 MODULE_DEPEND(jme, pci, 1, 1, 1);
83 MODULE_DEPEND(jme, ether, 1, 1, 1);
84 MODULE_DEPEND(jme, miibus, 1, 1, 1);
87 static int msi_disable = 0;
88 static int msix_disable = 0;
89 TUNABLE_INT("hw.jme.msi_disable", &msi_disable);
90 TUNABLE_INT("hw.jme.msix_disable", &msix_disable);
93 * Devices supported by this driver.
95 static struct jme_dev {
96 uint16_t jme_vendorid;
97 uint16_t jme_deviceid;
100 { VENDORID_JMICRON, DEVICEID_JMC250,
101 "JMicron Inc, JMC250 Gigabit Ethernet" },
102 { VENDORID_JMICRON, DEVICEID_JMC260,
103 "JMicron Inc, JMC260 Fast Ethernet" },
106 static int jme_miibus_readreg(device_t, int, int);
107 static int jme_miibus_writereg(device_t, int, int, int);
108 static void jme_miibus_statchg(device_t);
109 static void jme_mediastatus(struct ifnet *, struct ifmediareq *);
110 static int jme_mediachange(struct ifnet *);
111 static int jme_probe(device_t);
112 static int jme_eeprom_read_byte(struct jme_softc *, uint8_t, uint8_t *);
113 static int jme_eeprom_macaddr(struct jme_softc *);
114 static void jme_reg_macaddr(struct jme_softc *);
115 static void jme_map_intr_vector(struct jme_softc *);
116 static int jme_attach(device_t);
117 static int jme_detach(device_t);
118 static void jme_sysctl_node(struct jme_softc *);
119 static void jme_dmamap_cb(void *, bus_dma_segment_t *, int, int);
120 static int jme_dma_alloc(struct jme_softc *);
121 static void jme_dma_free(struct jme_softc *);
122 static int jme_shutdown(device_t);
123 static void jme_setlinkspeed(struct jme_softc *);
124 static void jme_setwol(struct jme_softc *);
125 static int jme_suspend(device_t);
126 static int jme_resume(device_t);
127 static int jme_encap(struct jme_softc *, struct mbuf **);
128 static void jme_tx_task(void *, int);
129 static void jme_start(struct ifnet *);
130 static void jme_watchdog(struct jme_softc *);
131 static int jme_ioctl(struct ifnet *, u_long, caddr_t);
132 static void jme_mac_config(struct jme_softc *);
133 static void jme_link_task(void *, int);
134 static int jme_intr(void *);
135 static void jme_int_task(void *, int);
136 static void jme_txeof(struct jme_softc *);
137 static __inline void jme_discard_rxbuf(struct jme_softc *, int);
138 static void jme_rxeof(struct jme_softc *);
139 static int jme_rxintr(struct jme_softc *, int);
140 static void jme_tick(void *);
141 static void jme_reset(struct jme_softc *);
142 static void jme_init(void *);
143 static void jme_init_locked(struct jme_softc *);
144 static void jme_stop(struct jme_softc *);
145 static void jme_stop_tx(struct jme_softc *);
146 static void jme_stop_rx(struct jme_softc *);
147 static int jme_init_rx_ring(struct jme_softc *);
148 static void jme_init_tx_ring(struct jme_softc *);
149 static void jme_init_ssb(struct jme_softc *);
150 static int jme_newbuf(struct jme_softc *, struct jme_rxdesc *);
151 static void jme_set_vlan(struct jme_softc *);
152 static void jme_set_filter(struct jme_softc *);
153 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
154 static int sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS);
155 static int sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS);
156 static int sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS);
157 static int sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS);
158 static int sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS);
161 static device_method_t jme_methods[] = {
162 /* Device interface. */
163 DEVMETHOD(device_probe, jme_probe),
164 DEVMETHOD(device_attach, jme_attach),
165 DEVMETHOD(device_detach, jme_detach),
166 DEVMETHOD(device_shutdown, jme_shutdown),
167 DEVMETHOD(device_suspend, jme_suspend),
168 DEVMETHOD(device_resume, jme_resume),
171 DEVMETHOD(miibus_readreg, jme_miibus_readreg),
172 DEVMETHOD(miibus_writereg, jme_miibus_writereg),
173 DEVMETHOD(miibus_statchg, jme_miibus_statchg),
178 static driver_t jme_driver = {
181 sizeof(struct jme_softc)
184 static devclass_t jme_devclass;
186 DRIVER_MODULE(jme, pci, jme_driver, jme_devclass, 0, 0);
187 DRIVER_MODULE(miibus, jme, miibus_driver, miibus_devclass, 0, 0);
189 static struct resource_spec jme_res_spec_mem[] = {
190 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE },
194 static struct resource_spec jme_irq_spec_legacy[] = {
195 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
199 static struct resource_spec jme_irq_spec_msi[] = {
200 { SYS_RES_IRQ, 1, RF_ACTIVE },
201 { SYS_RES_IRQ, 2, RF_ACTIVE },
202 { SYS_RES_IRQ, 3, RF_ACTIVE },
203 { SYS_RES_IRQ, 4, RF_ACTIVE },
204 { SYS_RES_IRQ, 5, RF_ACTIVE },
205 { SYS_RES_IRQ, 6, RF_ACTIVE },
206 { SYS_RES_IRQ, 7, RF_ACTIVE },
207 { SYS_RES_IRQ, 8, RF_ACTIVE },
212 * Read a PHY register on the MII of the JMC250.
215 jme_miibus_readreg(device_t dev, int phy, int reg)
217 struct jme_softc *sc;
221 sc = device_get_softc(dev);
223 /* For FPGA version, PHY address 0 should be ignored. */
224 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
228 if (sc->jme_phyaddr != phy)
232 CSR_WRITE_4(sc, JME_SMI, SMI_OP_READ | SMI_OP_EXECUTE |
233 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
234 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
236 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
241 device_printf(sc->jme_dev, "phy read timeout : %d\n", reg);
245 return ((val & SMI_DATA_MASK) >> SMI_DATA_SHIFT);
249 * Write a PHY register on the MII of the JMC250.
252 jme_miibus_writereg(device_t dev, int phy, int reg, int val)
254 struct jme_softc *sc;
257 sc = device_get_softc(dev);
259 /* For FPGA version, PHY address 0 should be ignored. */
260 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
264 if (sc->jme_phyaddr != phy)
268 CSR_WRITE_4(sc, JME_SMI, SMI_OP_WRITE | SMI_OP_EXECUTE |
269 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
270 SMI_PHY_ADDR(phy) | SMI_REG_ADDR(reg));
271 for (i = JME_PHY_TIMEOUT; i > 0; i--) {
273 if (((val = CSR_READ_4(sc, JME_SMI)) & SMI_OP_EXECUTE) == 0)
278 device_printf(sc->jme_dev, "phy write timeout : %d\n", reg);
284 * Callback from MII layer when media changes.
287 jme_miibus_statchg(device_t dev)
289 struct jme_softc *sc;
291 sc = device_get_softc(dev);
292 taskqueue_enqueue(taskqueue_swi, &sc->jme_link_task);
296 * Get the current interface media status.
299 jme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
301 struct jme_softc *sc;
302 struct mii_data *mii;
306 mii = device_get_softc(sc->jme_miibus);
309 ifmr->ifm_status = mii->mii_media_status;
310 ifmr->ifm_active = mii->mii_media_active;
315 * Set hardware to newly-selected media.
318 jme_mediachange(struct ifnet *ifp)
320 struct jme_softc *sc;
321 struct mii_data *mii;
322 struct mii_softc *miisc;
327 mii = device_get_softc(sc->jme_miibus);
328 if (mii->mii_instance != 0) {
329 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
330 mii_phy_reset(miisc);
332 error = mii_mediachg(mii);
339 jme_probe(device_t dev)
343 uint16_t vendor, devid;
345 vendor = pci_get_vendor(dev);
346 devid = pci_get_device(dev);
348 for (i = 0; i < sizeof(jme_devs) / sizeof(jme_devs[0]);
350 if (vendor == sp->jme_vendorid &&
351 devid == sp->jme_deviceid) {
352 device_set_desc(dev, sp->jme_name);
353 return (BUS_PROBE_DEFAULT);
361 jme_eeprom_read_byte(struct jme_softc *sc, uint8_t addr, uint8_t *val)
367 for (i = JME_TIMEOUT; i > 0; i--) {
368 reg = CSR_READ_4(sc, JME_SMBCSR);
369 if ((reg & SMBCSR_HW_BUSY_MASK) == SMBCSR_HW_IDLE)
375 device_printf(sc->jme_dev, "EEPROM idle timeout!\n");
379 reg = ((uint32_t)addr << SMBINTF_ADDR_SHIFT) & SMBINTF_ADDR_MASK;
380 CSR_WRITE_4(sc, JME_SMBINTF, reg | SMBINTF_RD | SMBINTF_CMD_TRIGGER);
381 for (i = JME_TIMEOUT; i > 0; i--) {
383 reg = CSR_READ_4(sc, JME_SMBINTF);
384 if ((reg & SMBINTF_CMD_TRIGGER) == 0)
389 device_printf(sc->jme_dev, "EEPROM read timeout!\n");
393 reg = CSR_READ_4(sc, JME_SMBINTF);
394 *val = (reg & SMBINTF_RD_DATA_MASK) >> SMBINTF_RD_DATA_SHIFT;
400 jme_eeprom_macaddr(struct jme_softc *sc)
402 uint8_t eaddr[ETHER_ADDR_LEN];
403 uint8_t fup, reg, val;
408 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
409 fup != JME_EEPROM_SIG0)
411 if (jme_eeprom_read_byte(sc, offset++, &fup) != 0 ||
412 fup != JME_EEPROM_SIG1)
416 if (jme_eeprom_read_byte(sc, offset, &fup) != 0)
418 /* Check for the end of EEPROM descriptor. */
419 if ((fup & JME_EEPROM_DESC_END) == JME_EEPROM_DESC_END)
421 if ((uint8_t)JME_EEPROM_MKDESC(JME_EEPROM_FUNC0,
422 JME_EEPROM_PAGE_BAR1) == fup) {
423 if (jme_eeprom_read_byte(sc, offset + 1, ®) != 0)
425 if (reg >= JME_PAR0 &&
426 reg < JME_PAR0 + ETHER_ADDR_LEN) {
427 if (jme_eeprom_read_byte(sc, offset + 2,
430 eaddr[reg - JME_PAR0] = val;
434 /* Try next eeprom descriptor. */
435 offset += JME_EEPROM_DESC_BYTES;
436 } while (match != ETHER_ADDR_LEN && offset < JME_EEPROM_END);
438 if (match == ETHER_ADDR_LEN) {
439 bcopy(eaddr, sc->jme_eaddr, ETHER_ADDR_LEN);
447 jme_reg_macaddr(struct jme_softc *sc)
451 /* Read station address. */
452 par0 = CSR_READ_4(sc, JME_PAR0);
453 par1 = CSR_READ_4(sc, JME_PAR1);
455 if ((par0 == 0 && par1 == 0) ||
456 (par0 == 0xFFFFFFFF && par1 == 0xFFFF)) {
457 device_printf(sc->jme_dev,
458 "generating fake ethernet address.\n");
460 /* Set OUI to JMicron. */
461 sc->jme_eaddr[0] = 0x00;
462 sc->jme_eaddr[1] = 0x1B;
463 sc->jme_eaddr[2] = 0x8C;
464 sc->jme_eaddr[3] = (par0 >> 16) & 0xff;
465 sc->jme_eaddr[4] = (par0 >> 8) & 0xff;
466 sc->jme_eaddr[5] = par0 & 0xff;
468 sc->jme_eaddr[0] = (par0 >> 0) & 0xFF;
469 sc->jme_eaddr[1] = (par0 >> 8) & 0xFF;
470 sc->jme_eaddr[2] = (par0 >> 16) & 0xFF;
471 sc->jme_eaddr[3] = (par0 >> 24) & 0xFF;
472 sc->jme_eaddr[4] = (par1 >> 0) & 0xFF;
473 sc->jme_eaddr[5] = (par1 >> 8) & 0xFF;
478 jme_map_intr_vector(struct jme_softc *sc)
480 uint32_t map[MSINUM_NUM_INTR_SOURCE / JME_MSI_MESSAGES];
482 bzero(map, sizeof(map));
484 /* Map Tx interrupts source to MSI/MSIX vector 2. */
485 map[MSINUM_REG_INDEX(N_INTR_TXQ0_COMP)] =
486 MSINUM_INTR_SOURCE(2, N_INTR_TXQ0_COMP);
487 map[MSINUM_REG_INDEX(N_INTR_TXQ1_COMP)] |=
488 MSINUM_INTR_SOURCE(2, N_INTR_TXQ1_COMP);
489 map[MSINUM_REG_INDEX(N_INTR_TXQ2_COMP)] |=
490 MSINUM_INTR_SOURCE(2, N_INTR_TXQ2_COMP);
491 map[MSINUM_REG_INDEX(N_INTR_TXQ3_COMP)] |=
492 MSINUM_INTR_SOURCE(2, N_INTR_TXQ3_COMP);
493 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
494 MSINUM_INTR_SOURCE(2, N_INTR_TXQ4_COMP);
495 map[MSINUM_REG_INDEX(N_INTR_TXQ4_COMP)] |=
496 MSINUM_INTR_SOURCE(2, N_INTR_TXQ5_COMP);
497 map[MSINUM_REG_INDEX(N_INTR_TXQ6_COMP)] |=
498 MSINUM_INTR_SOURCE(2, N_INTR_TXQ6_COMP);
499 map[MSINUM_REG_INDEX(N_INTR_TXQ7_COMP)] |=
500 MSINUM_INTR_SOURCE(2, N_INTR_TXQ7_COMP);
501 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL)] |=
502 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL);
503 map[MSINUM_REG_INDEX(N_INTR_TXQ_COAL_TO)] |=
504 MSINUM_INTR_SOURCE(2, N_INTR_TXQ_COAL_TO);
506 /* Map Rx interrupts source to MSI/MSIX vector 1. */
507 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COMP)] =
508 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COMP);
509 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COMP)] =
510 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COMP);
511 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COMP)] =
512 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COMP);
513 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COMP)] =
514 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COMP);
515 map[MSINUM_REG_INDEX(N_INTR_RXQ0_DESC_EMPTY)] =
516 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_DESC_EMPTY);
517 map[MSINUM_REG_INDEX(N_INTR_RXQ1_DESC_EMPTY)] =
518 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_DESC_EMPTY);
519 map[MSINUM_REG_INDEX(N_INTR_RXQ2_DESC_EMPTY)] =
520 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_DESC_EMPTY);
521 map[MSINUM_REG_INDEX(N_INTR_RXQ3_DESC_EMPTY)] =
522 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_DESC_EMPTY);
523 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL)] =
524 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL);
525 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL)] =
526 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL);
527 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL)] =
528 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL);
529 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL)] =
530 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL);
531 map[MSINUM_REG_INDEX(N_INTR_RXQ0_COAL_TO)] =
532 MSINUM_INTR_SOURCE(1, N_INTR_RXQ0_COAL_TO);
533 map[MSINUM_REG_INDEX(N_INTR_RXQ1_COAL_TO)] =
534 MSINUM_INTR_SOURCE(1, N_INTR_RXQ1_COAL_TO);
535 map[MSINUM_REG_INDEX(N_INTR_RXQ2_COAL_TO)] =
536 MSINUM_INTR_SOURCE(1, N_INTR_RXQ2_COAL_TO);
537 map[MSINUM_REG_INDEX(N_INTR_RXQ3_COAL_TO)] =
538 MSINUM_INTR_SOURCE(1, N_INTR_RXQ3_COAL_TO);
540 /* Map all other interrupts source to MSI/MSIX vector 0. */
541 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 0, map[0]);
542 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 1, map[1]);
543 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 2, map[2]);
544 CSR_WRITE_4(sc, JME_MSINUM_BASE + sizeof(uint32_t) * 3, map[3]);
548 jme_attach(device_t dev)
550 struct jme_softc *sc;
552 struct mii_softc *miisc;
553 struct mii_data *mii;
556 int error, i, msic, msixc, pmc;
559 sc = device_get_softc(dev);
562 mtx_init(&sc->jme_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
564 callout_init_mtx(&sc->jme_tick_ch, &sc->jme_mtx, 0);
565 TASK_INIT(&sc->jme_int_task, 0, jme_int_task, sc);
566 TASK_INIT(&sc->jme_link_task, 0, jme_link_task, sc);
569 * Map the device. JMC250 supports both memory mapped and I/O
570 * register space access. Because I/O register access should
571 * use different BARs to access registers it's waste of time
572 * to use I/O register spce access. JMC250 uses 16K to map
573 * entire memory space.
575 pci_enable_busmaster(dev);
576 sc->jme_res_spec = jme_res_spec_mem;
577 sc->jme_irq_spec = jme_irq_spec_legacy;
578 error = bus_alloc_resources(dev, sc->jme_res_spec, sc->jme_res);
580 device_printf(dev, "cannot allocate memory resources.\n");
584 /* Allocate IRQ resources. */
585 msixc = pci_msix_count(dev);
586 msic = pci_msi_count(dev);
588 device_printf(dev, "MSIX count : %d\n", msixc);
589 device_printf(dev, "MSI count : %d\n", msic);
592 /* Prefer MSIX over MSI. */
593 if (msix_disable == 0 || msi_disable == 0) {
594 if (msix_disable == 0 && msixc == JME_MSIX_MESSAGES &&
595 pci_alloc_msix(dev, &msixc) == 0) {
596 if (msic == JME_MSIX_MESSAGES) {
597 device_printf(dev, "Using %d MSIX messages.\n",
599 sc->jme_flags |= JME_FLAG_MSIX;
600 sc->jme_irq_spec = jme_irq_spec_msi;
602 pci_release_msi(dev);
604 if (msi_disable == 0 && (sc->jme_flags & JME_FLAG_MSIX) == 0 &&
605 msic == JME_MSI_MESSAGES &&
606 pci_alloc_msi(dev, &msic) == 0) {
607 if (msic == JME_MSI_MESSAGES) {
608 device_printf(dev, "Using %d MSI messages.\n",
610 sc->jme_flags |= JME_FLAG_MSI;
611 sc->jme_irq_spec = jme_irq_spec_msi;
613 pci_release_msi(dev);
615 /* Map interrupt vector 0, 1 and 2. */
616 if ((sc->jme_flags & JME_FLAG_MSI) != 0 ||
617 (sc->jme_flags & JME_FLAG_MSIX) != 0)
618 jme_map_intr_vector(sc);
621 error = bus_alloc_resources(dev, sc->jme_irq_spec, sc->jme_irq);
623 device_printf(dev, "cannot allocate IRQ resources.\n");
627 sc->jme_rev = pci_get_revid(dev);
628 if (sc->jme_rev == DEVICEREVID_JMC260) {
629 sc->jme_flags |= JME_FLAG_FASTETH;
630 sc->jme_flags |= JME_FLAG_NOJUMBO;
632 reg = CSR_READ_4(sc, JME_CHIPMODE);
633 sc->jme_chip_rev = (reg & CHIPMODE_REV_MASK) >> CHIPMODE_REV_SHIFT;
634 if (((reg & CHIPMODE_FPGA_REV_MASK) >> CHIPMODE_FPGA_REV_SHIFT) !=
636 sc->jme_flags |= JME_FLAG_FPGA;
638 device_printf(dev, "PCI device revision : 0x%04x\n",
640 device_printf(dev, "Chip revision : 0x%02x\n",
642 if ((sc->jme_flags & JME_FLAG_FPGA) != 0)
643 device_printf(dev, "FPGA revision : 0x%04x\n",
644 (reg & CHIPMODE_FPGA_REV_MASK) >>
645 CHIPMODE_FPGA_REV_SHIFT);
647 if (sc->jme_chip_rev == 0xFF) {
648 device_printf(dev, "Unknown chip revision : 0x%02x\n",
654 /* Reset the ethernet controller. */
657 /* Get station address. */
658 reg = CSR_READ_4(sc, JME_SMBCSR);
659 if ((reg & SMBCSR_EEPROM_PRESENT) != 0)
660 error = jme_eeprom_macaddr(sc);
661 if (error != 0 || (reg & SMBCSR_EEPROM_PRESENT) == 0) {
662 if (error != 0 && (bootverbose))
663 device_printf(sc->jme_dev,
664 "ethernet hardware address not found in EEPROM.\n");
670 * Integrated JR0211 has fixed PHY address whereas FPGA version
671 * requires PHY probing to get correct PHY address.
673 if ((sc->jme_flags & JME_FLAG_FPGA) == 0) {
674 sc->jme_phyaddr = CSR_READ_4(sc, JME_GPREG0) &
675 GPREG0_PHY_ADDR_MASK;
677 device_printf(dev, "PHY is at address %d.\n",
682 /* Set max allowable DMA size. */
683 if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) {
684 sc->jme_flags |= JME_FLAG_PCIE;
685 burst = pci_read_config(dev, i + 0x08, 2);
687 device_printf(dev, "Read request size : %d bytes.\n",
688 128 << ((burst >> 12) & 0x07));
689 device_printf(dev, "TLP payload size : %d bytes.\n",
690 128 << ((burst >> 5) & 0x07));
692 switch ((burst >> 12) & 0x07) {
694 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_128;
697 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_256;
700 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
703 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
705 sc->jme_tx_dma_size = TXCSR_DMA_SIZE_512;
706 sc->jme_rx_dma_size = RXCSR_DMA_SIZE_128;
708 /* Create coalescing sysctl node. */
710 if ((error = jme_dma_alloc(sc) != 0))
713 ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
715 device_printf(dev, "cannot allocate ifnet structure.\n");
721 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
722 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
723 ifp->if_ioctl = jme_ioctl;
724 ifp->if_start = jme_start;
725 ifp->if_init = jme_init;
726 ifp->if_snd.ifq_drv_maxlen = JME_TX_RING_CNT - 1;
727 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen);
728 IFQ_SET_READY(&ifp->if_snd);
729 /* JMC250 supports Tx/Rx checksum offload as well as TSO. */
730 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4;
731 ifp->if_hwassist = JME_CSUM_FEATURES | CSUM_TSO;
732 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) {
733 sc->jme_flags |= JME_FLAG_PMCAP;
734 ifp->if_capabilities |= IFCAP_WOL_MAGIC;
736 ifp->if_capenable = ifp->if_capabilities;
738 /* Set up MII bus. */
739 if ((error = mii_phy_probe(dev, &sc->jme_miibus, jme_mediachange,
740 jme_mediastatus)) != 0) {
741 device_printf(dev, "no PHY found!\n");
746 * Force PHY to FPGA mode.
748 if ((sc->jme_flags & JME_FLAG_FPGA) != 0) {
749 mii = device_get_softc(sc->jme_miibus);
750 if (mii->mii_instance != 0) {
751 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
752 if (miisc->mii_phy != 0) {
753 sc->jme_phyaddr = miisc->mii_phy;
757 if (sc->jme_phyaddr != 0) {
758 device_printf(sc->jme_dev,
759 "FPGA PHY is at %d\n", sc->jme_phyaddr);
761 jme_miibus_writereg(dev, sc->jme_phyaddr, 27,
767 ether_ifattach(ifp, sc->jme_eaddr);
769 /* VLAN capability setup */
770 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
772 ifp->if_capenable = ifp->if_capabilities;
774 /* Tell the upper layer(s) we support long frames. */
775 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
777 /* Create local taskq. */
778 TASK_INIT(&sc->jme_tx_task, 1, jme_tx_task, ifp);
779 sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
780 taskqueue_thread_enqueue, &sc->jme_tq);
781 if (sc->jme_tq == NULL) {
782 device_printf(dev, "could not create taskqueue.\n");
787 taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
788 device_get_nameunit(sc->jme_dev));
790 if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
791 msic = JME_MSIX_MESSAGES;
792 else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
793 msic = JME_MSI_MESSAGES;
796 for (i = 0; i < msic; i++) {
797 error = bus_setup_intr(dev, sc->jme_irq[i],
798 INTR_TYPE_NET | INTR_MPSAFE, jme_intr, NULL, sc,
799 &sc->jme_intrhand[i]);
805 device_printf(dev, "could not set up interrupt handler.\n");
806 taskqueue_free(sc->jme_tq);
820 jme_detach(device_t dev)
822 struct jme_softc *sc;
826 sc = device_get_softc(dev);
829 if (device_is_attached(dev)) {
831 sc->jme_flags |= JME_FLAG_DETACH;
834 callout_drain(&sc->jme_tick_ch);
835 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
836 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
837 taskqueue_drain(taskqueue_swi, &sc->jme_link_task);
841 if (sc->jme_tq != NULL) {
842 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
843 taskqueue_free(sc->jme_tq);
847 if (sc->jme_miibus != NULL) {
848 device_delete_child(dev, sc->jme_miibus);
849 sc->jme_miibus = NULL;
851 bus_generic_detach(dev);
860 if ((sc->jme_flags & JME_FLAG_MSIX) != 0)
861 msic = JME_MSIX_MESSAGES;
862 else if ((sc->jme_flags & JME_FLAG_MSI) != 0)
863 msic = JME_MSI_MESSAGES;
866 for (i = 0; i < msic; i++) {
867 if (sc->jme_intrhand[i] != NULL) {
868 bus_teardown_intr(dev, sc->jme_irq[i],
869 sc->jme_intrhand[i]);
870 sc->jme_intrhand[i] = NULL;
874 bus_release_resources(dev, sc->jme_irq_spec, sc->jme_irq);
875 if ((sc->jme_flags & (JME_FLAG_MSIX | JME_FLAG_MSI)) != 0)
876 pci_release_msi(dev);
877 bus_release_resources(dev, sc->jme_res_spec, sc->jme_res);
878 mtx_destroy(&sc->jme_mtx);
884 jme_sysctl_node(struct jme_softc *sc)
888 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
889 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
890 "tx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_to,
891 0, sysctl_hw_jme_tx_coal_to, "I", "jme tx coalescing timeout");
893 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
894 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
895 "tx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_tx_coal_pkt,
896 0, sysctl_hw_jme_tx_coal_pkt, "I", "jme tx coalescing packet");
898 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
899 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
900 "rx_coal_to", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_to,
901 0, sysctl_hw_jme_rx_coal_to, "I", "jme rx coalescing timeout");
903 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
904 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
905 "rx_coal_pkt", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_rx_coal_pkt,
906 0, sysctl_hw_jme_rx_coal_pkt, "I", "jme rx coalescing packet");
908 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->jme_dev),
909 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->jme_dev)), OID_AUTO,
910 "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->jme_process_limit,
911 0, sysctl_hw_jme_proc_limit, "I",
912 "max number of Rx events to process");
914 /* Pull in device tunables. */
915 sc->jme_process_limit = JME_PROC_DEFAULT;
916 error = resource_int_value(device_get_name(sc->jme_dev),
917 device_get_unit(sc->jme_dev), "process_limit",
918 &sc->jme_process_limit);
920 if (sc->jme_process_limit < JME_PROC_MIN ||
921 sc->jme_process_limit > JME_PROC_MAX) {
922 device_printf(sc->jme_dev,
923 "process_limit value out of range; "
924 "using default: %d\n", JME_PROC_DEFAULT);
925 sc->jme_process_limit = JME_PROC_DEFAULT;
929 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
930 error = resource_int_value(device_get_name(sc->jme_dev),
931 device_get_unit(sc->jme_dev), "tx_coal_to", &sc->jme_tx_coal_to);
933 if (sc->jme_tx_coal_to < PCCTX_COAL_TO_MIN ||
934 sc->jme_tx_coal_to > PCCTX_COAL_TO_MAX) {
935 device_printf(sc->jme_dev,
936 "tx_coal_to value out of range; "
937 "using default: %d\n", PCCTX_COAL_TO_DEFAULT);
938 sc->jme_tx_coal_to = PCCTX_COAL_TO_DEFAULT;
942 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
943 error = resource_int_value(device_get_name(sc->jme_dev),
944 device_get_unit(sc->jme_dev), "tx_coal_pkt", &sc->jme_tx_coal_to);
946 if (sc->jme_tx_coal_pkt < PCCTX_COAL_PKT_MIN ||
947 sc->jme_tx_coal_pkt > PCCTX_COAL_PKT_MAX) {
948 device_printf(sc->jme_dev,
949 "tx_coal_pkt value out of range; "
950 "using default: %d\n", PCCTX_COAL_PKT_DEFAULT);
951 sc->jme_tx_coal_pkt = PCCTX_COAL_PKT_DEFAULT;
955 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
956 error = resource_int_value(device_get_name(sc->jme_dev),
957 device_get_unit(sc->jme_dev), "rx_coal_to", &sc->jme_rx_coal_to);
959 if (sc->jme_rx_coal_to < PCCRX_COAL_TO_MIN ||
960 sc->jme_rx_coal_to > PCCRX_COAL_TO_MAX) {
961 device_printf(sc->jme_dev,
962 "rx_coal_to value out of range; "
963 "using default: %d\n", PCCRX_COAL_TO_DEFAULT);
964 sc->jme_rx_coal_to = PCCRX_COAL_TO_DEFAULT;
968 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
969 error = resource_int_value(device_get_name(sc->jme_dev),
970 device_get_unit(sc->jme_dev), "rx_coal_pkt", &sc->jme_rx_coal_to);
972 if (sc->jme_rx_coal_pkt < PCCRX_COAL_PKT_MIN ||
973 sc->jme_rx_coal_pkt > PCCRX_COAL_PKT_MAX) {
974 device_printf(sc->jme_dev,
975 "tx_coal_pkt value out of range; "
976 "using default: %d\n", PCCRX_COAL_PKT_DEFAULT);
977 sc->jme_rx_coal_pkt = PCCRX_COAL_PKT_DEFAULT;
982 struct jme_dmamap_arg {
983 bus_addr_t jme_busaddr;
987 jme_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
989 struct jme_dmamap_arg *ctx;
994 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
996 ctx = (struct jme_dmamap_arg *)arg;
997 ctx->jme_busaddr = segs[0].ds_addr;
1001 jme_dma_alloc(struct jme_softc *sc)
1003 struct jme_dmamap_arg ctx;
1004 struct jme_txdesc *txd;
1005 struct jme_rxdesc *rxd;
1006 bus_addr_t lowaddr, rx_ring_end, tx_ring_end;
1009 lowaddr = BUS_SPACE_MAXADDR;
1012 /* Create parent ring tag. */
1013 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1014 1, 0, /* algnmnt, boundary */
1015 lowaddr, /* lowaddr */
1016 BUS_SPACE_MAXADDR, /* highaddr */
1017 NULL, NULL, /* filter, filterarg */
1018 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1020 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1022 NULL, NULL, /* lockfunc, lockarg */
1023 &sc->jme_cdata.jme_ring_tag);
1025 device_printf(sc->jme_dev,
1026 "could not create parent ring DMA tag.\n");
1029 /* Create tag for Tx ring. */
1030 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1031 JME_TX_RING_ALIGN, 0, /* algnmnt, boundary */
1032 BUS_SPACE_MAXADDR, /* lowaddr */
1033 BUS_SPACE_MAXADDR, /* highaddr */
1034 NULL, NULL, /* filter, filterarg */
1035 JME_TX_RING_SIZE, /* maxsize */
1037 JME_TX_RING_SIZE, /* maxsegsize */
1039 NULL, NULL, /* lockfunc, lockarg */
1040 &sc->jme_cdata.jme_tx_ring_tag);
1042 device_printf(sc->jme_dev,
1043 "could not allocate Tx ring DMA tag.\n");
1047 /* Create tag for Rx ring. */
1048 error = bus_dma_tag_create(sc->jme_cdata.jme_ring_tag,/* parent */
1049 JME_RX_RING_ALIGN, 0, /* algnmnt, boundary */
1050 lowaddr, /* lowaddr */
1051 BUS_SPACE_MAXADDR, /* highaddr */
1052 NULL, NULL, /* filter, filterarg */
1053 JME_RX_RING_SIZE, /* maxsize */
1055 JME_RX_RING_SIZE, /* maxsegsize */
1057 NULL, NULL, /* lockfunc, lockarg */
1058 &sc->jme_cdata.jme_rx_ring_tag);
1060 device_printf(sc->jme_dev,
1061 "could not allocate Rx ring DMA tag.\n");
1065 /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1066 error = bus_dmamem_alloc(sc->jme_cdata.jme_tx_ring_tag,
1067 (void **)&sc->jme_rdata.jme_tx_ring,
1068 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1069 &sc->jme_cdata.jme_tx_ring_map);
1071 device_printf(sc->jme_dev,
1072 "could not allocate DMA'able memory for Tx ring.\n");
1076 ctx.jme_busaddr = 0;
1077 error = bus_dmamap_load(sc->jme_cdata.jme_tx_ring_tag,
1078 sc->jme_cdata.jme_tx_ring_map, sc->jme_rdata.jme_tx_ring,
1079 JME_TX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1080 if (error != 0 || ctx.jme_busaddr == 0) {
1081 device_printf(sc->jme_dev,
1082 "could not load DMA'able memory for Tx ring.\n");
1085 sc->jme_rdata.jme_tx_ring_paddr = ctx.jme_busaddr;
1087 /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1088 error = bus_dmamem_alloc(sc->jme_cdata.jme_rx_ring_tag,
1089 (void **)&sc->jme_rdata.jme_rx_ring,
1090 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1091 &sc->jme_cdata.jme_rx_ring_map);
1093 device_printf(sc->jme_dev,
1094 "could not allocate DMA'able memory for Rx ring.\n");
1098 ctx.jme_busaddr = 0;
1099 error = bus_dmamap_load(sc->jme_cdata.jme_rx_ring_tag,
1100 sc->jme_cdata.jme_rx_ring_map, sc->jme_rdata.jme_rx_ring,
1101 JME_RX_RING_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1102 if (error != 0 || ctx.jme_busaddr == 0) {
1103 device_printf(sc->jme_dev,
1104 "could not load DMA'able memory for Rx ring.\n");
1107 sc->jme_rdata.jme_rx_ring_paddr = ctx.jme_busaddr;
1109 /* Tx/Rx descriptor queue should reside within 4GB boundary. */
1110 tx_ring_end = sc->jme_rdata.jme_tx_ring_paddr + JME_TX_RING_SIZE;
1111 rx_ring_end = sc->jme_rdata.jme_rx_ring_paddr + JME_RX_RING_SIZE;
1112 if ((JME_ADDR_HI(tx_ring_end) !=
1113 JME_ADDR_HI(sc->jme_rdata.jme_tx_ring_paddr)) ||
1114 (JME_ADDR_HI(rx_ring_end) !=
1115 JME_ADDR_HI(sc->jme_rdata.jme_rx_ring_paddr))) {
1116 device_printf(sc->jme_dev, "4GB boundary crossed, "
1117 "switching to 32bit DMA address mode.\n");
1119 /* Limit DMA address space to 32bit and try again. */
1120 lowaddr = BUS_SPACE_MAXADDR_32BIT;
1124 /* Create parent buffer tag. */
1125 error = bus_dma_tag_create(bus_get_dma_tag(sc->jme_dev),/* parent */
1126 1, 0, /* algnmnt, boundary */
1127 BUS_SPACE_MAXADDR, /* lowaddr */
1128 BUS_SPACE_MAXADDR, /* highaddr */
1129 NULL, NULL, /* filter, filterarg */
1130 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1132 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1134 NULL, NULL, /* lockfunc, lockarg */
1135 &sc->jme_cdata.jme_buffer_tag);
1137 device_printf(sc->jme_dev,
1138 "could not create parent buffer DMA tag.\n");
1142 /* Create shadow status block tag. */
1143 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1144 JME_SSB_ALIGN, 0, /* algnmnt, boundary */
1145 BUS_SPACE_MAXADDR, /* lowaddr */
1146 BUS_SPACE_MAXADDR, /* highaddr */
1147 NULL, NULL, /* filter, filterarg */
1148 JME_SSB_SIZE, /* maxsize */
1150 JME_SSB_SIZE, /* maxsegsize */
1152 NULL, NULL, /* lockfunc, lockarg */
1153 &sc->jme_cdata.jme_ssb_tag);
1155 device_printf(sc->jme_dev,
1156 "could not create shared status block DMA tag.\n");
1160 /* Create tag for Tx buffers. */
1161 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1162 1, 0, /* algnmnt, boundary */
1163 BUS_SPACE_MAXADDR, /* lowaddr */
1164 BUS_SPACE_MAXADDR, /* highaddr */
1165 NULL, NULL, /* filter, filterarg */
1166 JME_TSO_MAXSIZE, /* maxsize */
1167 JME_MAXTXSEGS, /* nsegments */
1168 JME_TSO_MAXSEGSIZE, /* maxsegsize */
1170 NULL, NULL, /* lockfunc, lockarg */
1171 &sc->jme_cdata.jme_tx_tag);
1173 device_printf(sc->jme_dev, "could not create Tx DMA tag.\n");
1177 /* Create tag for Rx buffers. */
1178 error = bus_dma_tag_create(sc->jme_cdata.jme_buffer_tag,/* parent */
1179 JME_RX_BUF_ALIGN, 0, /* algnmnt, boundary */
1180 BUS_SPACE_MAXADDR, /* lowaddr */
1181 BUS_SPACE_MAXADDR, /* highaddr */
1182 NULL, NULL, /* filter, filterarg */
1183 MCLBYTES, /* maxsize */
1185 MCLBYTES, /* maxsegsize */
1187 NULL, NULL, /* lockfunc, lockarg */
1188 &sc->jme_cdata.jme_rx_tag);
1190 device_printf(sc->jme_dev, "could not create Rx DMA tag.\n");
1195 * Allocate DMA'able memory and load the DMA map for shared
1198 error = bus_dmamem_alloc(sc->jme_cdata.jme_ssb_tag,
1199 (void **)&sc->jme_rdata.jme_ssb_block,
1200 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
1201 &sc->jme_cdata.jme_ssb_map);
1203 device_printf(sc->jme_dev, "could not allocate DMA'able "
1204 "memory for shared status block.\n");
1208 ctx.jme_busaddr = 0;
1209 error = bus_dmamap_load(sc->jme_cdata.jme_ssb_tag,
1210 sc->jme_cdata.jme_ssb_map, sc->jme_rdata.jme_ssb_block,
1211 JME_SSB_SIZE, jme_dmamap_cb, &ctx, BUS_DMA_NOWAIT);
1212 if (error != 0 || ctx.jme_busaddr == 0) {
1213 device_printf(sc->jme_dev, "could not load DMA'able memory "
1214 "for shared status block.\n");
1217 sc->jme_rdata.jme_ssb_block_paddr = ctx.jme_busaddr;
1219 /* Create DMA maps for Tx buffers. */
1220 for (i = 0; i < JME_TX_RING_CNT; i++) {
1221 txd = &sc->jme_cdata.jme_txdesc[i];
1223 txd->tx_dmamap = NULL;
1224 error = bus_dmamap_create(sc->jme_cdata.jme_tx_tag, 0,
1227 device_printf(sc->jme_dev,
1228 "could not create Tx dmamap.\n");
1232 /* Create DMA maps for Rx buffers. */
1233 if ((error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1234 &sc->jme_cdata.jme_rx_sparemap)) != 0) {
1235 device_printf(sc->jme_dev,
1236 "could not create spare Rx dmamap.\n");
1239 for (i = 0; i < JME_RX_RING_CNT; i++) {
1240 rxd = &sc->jme_cdata.jme_rxdesc[i];
1242 rxd->rx_dmamap = NULL;
1243 error = bus_dmamap_create(sc->jme_cdata.jme_rx_tag, 0,
1246 device_printf(sc->jme_dev,
1247 "could not create Rx dmamap.\n");
1257 jme_dma_free(struct jme_softc *sc)
1259 struct jme_txdesc *txd;
1260 struct jme_rxdesc *rxd;
1264 if (sc->jme_cdata.jme_tx_ring_tag != NULL) {
1265 if (sc->jme_cdata.jme_tx_ring_map)
1266 bus_dmamap_unload(sc->jme_cdata.jme_tx_ring_tag,
1267 sc->jme_cdata.jme_tx_ring_map);
1268 if (sc->jme_cdata.jme_tx_ring_map &&
1269 sc->jme_rdata.jme_tx_ring)
1270 bus_dmamem_free(sc->jme_cdata.jme_tx_ring_tag,
1271 sc->jme_rdata.jme_tx_ring,
1272 sc->jme_cdata.jme_tx_ring_map);
1273 sc->jme_rdata.jme_tx_ring = NULL;
1274 sc->jme_cdata.jme_tx_ring_map = NULL;
1275 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_ring_tag);
1276 sc->jme_cdata.jme_tx_ring_tag = NULL;
1279 if (sc->jme_cdata.jme_rx_ring_tag != NULL) {
1280 if (sc->jme_cdata.jme_rx_ring_map)
1281 bus_dmamap_unload(sc->jme_cdata.jme_rx_ring_tag,
1282 sc->jme_cdata.jme_rx_ring_map);
1283 if (sc->jme_cdata.jme_rx_ring_map &&
1284 sc->jme_rdata.jme_rx_ring)
1285 bus_dmamem_free(sc->jme_cdata.jme_rx_ring_tag,
1286 sc->jme_rdata.jme_rx_ring,
1287 sc->jme_cdata.jme_rx_ring_map);
1288 sc->jme_rdata.jme_rx_ring = NULL;
1289 sc->jme_cdata.jme_rx_ring_map = NULL;
1290 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_ring_tag);
1291 sc->jme_cdata.jme_rx_ring_tag = NULL;
1294 if (sc->jme_cdata.jme_tx_tag != NULL) {
1295 for (i = 0; i < JME_TX_RING_CNT; i++) {
1296 txd = &sc->jme_cdata.jme_txdesc[i];
1297 if (txd->tx_dmamap != NULL) {
1298 bus_dmamap_destroy(sc->jme_cdata.jme_tx_tag,
1300 txd->tx_dmamap = NULL;
1303 bus_dma_tag_destroy(sc->jme_cdata.jme_tx_tag);
1304 sc->jme_cdata.jme_tx_tag = NULL;
1307 if (sc->jme_cdata.jme_rx_tag != NULL) {
1308 for (i = 0; i < JME_RX_RING_CNT; i++) {
1309 rxd = &sc->jme_cdata.jme_rxdesc[i];
1310 if (rxd->rx_dmamap != NULL) {
1311 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1313 rxd->rx_dmamap = NULL;
1316 if (sc->jme_cdata.jme_rx_sparemap != NULL) {
1317 bus_dmamap_destroy(sc->jme_cdata.jme_rx_tag,
1318 sc->jme_cdata.jme_rx_sparemap);
1319 sc->jme_cdata.jme_rx_sparemap = NULL;
1321 bus_dma_tag_destroy(sc->jme_cdata.jme_rx_tag);
1322 sc->jme_cdata.jme_rx_tag = NULL;
1325 /* Shared status block. */
1326 if (sc->jme_cdata.jme_ssb_tag != NULL) {
1327 if (sc->jme_cdata.jme_ssb_map)
1328 bus_dmamap_unload(sc->jme_cdata.jme_ssb_tag,
1329 sc->jme_cdata.jme_ssb_map);
1330 if (sc->jme_cdata.jme_ssb_map && sc->jme_rdata.jme_ssb_block)
1331 bus_dmamem_free(sc->jme_cdata.jme_ssb_tag,
1332 sc->jme_rdata.jme_ssb_block,
1333 sc->jme_cdata.jme_ssb_map);
1334 sc->jme_rdata.jme_ssb_block = NULL;
1335 sc->jme_cdata.jme_ssb_map = NULL;
1336 bus_dma_tag_destroy(sc->jme_cdata.jme_ssb_tag);
1337 sc->jme_cdata.jme_ssb_tag = NULL;
1340 if (sc->jme_cdata.jme_buffer_tag != NULL) {
1341 bus_dma_tag_destroy(sc->jme_cdata.jme_buffer_tag);
1342 sc->jme_cdata.jme_buffer_tag = NULL;
1344 if (sc->jme_cdata.jme_ring_tag != NULL) {
1345 bus_dma_tag_destroy(sc->jme_cdata.jme_ring_tag);
1346 sc->jme_cdata.jme_ring_tag = NULL;
1351 * Make sure the interface is stopped at reboot time.
1354 jme_shutdown(device_t dev)
1357 return (jme_suspend(dev));
1361 * Unlike other ethernet controllers, JMC250 requires
1362 * explicit resetting link speed to 10/100Mbps as gigabit
1363 * link will cunsume more power than 375mA.
1364 * Note, we reset the link speed to 10/100Mbps with
1365 * auto-negotiation but we don't know whether that operation
1366 * would succeed or not as we have no control after powering
1367 * off. If the renegotiation fail WOL may not work. Running
1368 * at 1Gbps draws more power than 375mA at 3.3V which is
1369 * specified in PCI specification and that would result in
1370 * complete shutdowning power to ethernet controller.
1373 * Save current negotiated media speed/duplex/flow-control
1374 * to softc and restore the same link again after resuming.
1375 * PHY handling such as power down/resetting to 100Mbps
1376 * may be better handled in suspend method in phy driver.
1379 jme_setlinkspeed(struct jme_softc *sc)
1381 struct mii_data *mii;
1384 JME_LOCK_ASSERT(sc);
1386 mii = device_get_softc(sc->jme_miibus);
1389 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1390 switch IFM_SUBTYPE(mii->mii_media_active) {
1400 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_100T2CR, 0);
1401 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_ANAR,
1402 ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA);
1403 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr, MII_BMCR,
1404 BMCR_AUTOEN | BMCR_STARTNEG);
1407 /* Poll link state until jme(4) get a 10/100 link. */
1408 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) {
1410 if ((mii->mii_media_status & IFM_AVALID) != 0) {
1411 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1421 pause("jmelnk", hz);
1424 if (i == MII_ANEGTICKS_GIGE)
1425 device_printf(sc->jme_dev, "establishing link failed, "
1426 "WOL may not work!");
1429 * No link, force MAC to have 100Mbps, full-duplex link.
1430 * This is the last resort and may/may not work.
1432 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE;
1433 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX;
1438 jme_setwol(struct jme_softc *sc)
1445 JME_LOCK_ASSERT(sc);
1447 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1448 /* No PME capability, PHY power down. */
1449 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1450 MII_BMCR, BMCR_PDOWN);
1455 gpr = CSR_READ_4(sc, JME_GPREG0) & ~GPREG0_PME_ENB;
1456 pmcs = CSR_READ_4(sc, JME_PMCS);
1457 pmcs &= ~PMCS_WOL_ENB_MASK;
1458 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) {
1459 pmcs |= PMCS_MAGIC_FRAME | PMCS_MAGIC_FRAME_ENB;
1460 /* Enable PME message. */
1461 gpr |= GPREG0_PME_ENB;
1462 /* For gigabit controllers, reset link speed to 10/100. */
1463 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
1464 jme_setlinkspeed(sc);
1467 CSR_WRITE_4(sc, JME_PMCS, pmcs);
1468 CSR_WRITE_4(sc, JME_GPREG0, gpr);
1471 pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
1472 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
1473 if ((ifp->if_capenable & IFCAP_WOL) != 0)
1474 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
1475 pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
1476 if ((ifp->if_capenable & IFCAP_WOL) == 0) {
1477 /* No WOL, PHY power down. */
1478 jme_miibus_writereg(sc->jme_dev, sc->jme_phyaddr,
1479 MII_BMCR, BMCR_PDOWN);
1484 jme_suspend(device_t dev)
1486 struct jme_softc *sc;
1488 sc = device_get_softc(dev);
1499 jme_resume(device_t dev)
1501 struct jme_softc *sc;
1506 sc = device_get_softc(dev);
1509 if (pci_find_extcap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
1510 pmstat = pci_read_config(sc->jme_dev,
1511 pmc + PCIR_POWER_STATUS, 2);
1512 /* Disable PME clear PME status. */
1513 pmstat &= ~PCIM_PSTAT_PMEENABLE;
1514 pci_write_config(sc->jme_dev,
1515 pmc + PCIR_POWER_STATUS, pmstat, 2);
1518 if ((ifp->if_flags & IFF_UP) != 0)
1519 jme_init_locked(sc);
1527 jme_encap(struct jme_softc *sc, struct mbuf **m_head)
1529 struct jme_txdesc *txd;
1530 struct jme_desc *desc;
1532 bus_dma_segment_t txsegs[JME_MAXTXSEGS];
1533 int error, i, nsegs, prod;
1534 uint32_t cflags, tso_segsz;
1536 JME_LOCK_ASSERT(sc);
1538 M_ASSERTPKTHDR((*m_head));
1540 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1542 * Due to the adherence to NDIS specification JMC250
1543 * assumes upper stack computed TCP pseudo checksum
1544 * without including payload length. This breaks
1545 * checksum offload for TSO case so recompute TCP
1546 * pseudo checksum for JMC250. Hopefully this wouldn't
1547 * be much burden on modern CPUs.
1549 struct ether_header *eh;
1552 uint32_t ip_off, poff;
1554 if (M_WRITABLE(*m_head) == 0) {
1555 /* Get a writable copy. */
1556 m = m_dup(*m_head, M_DONTWAIT);
1564 ip_off = sizeof(struct ether_header);
1565 m = m_pullup(*m_head, ip_off);
1570 eh = mtod(m, struct ether_header *);
1571 /* Check the existence of VLAN tag. */
1572 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1573 ip_off = sizeof(struct ether_vlan_header);
1574 m = m_pullup(m, ip_off);
1580 m = m_pullup(m, ip_off + sizeof(struct ip));
1585 ip = (struct ip *)(mtod(m, char *) + ip_off);
1586 poff = ip_off + (ip->ip_hl << 2);
1587 m = m_pullup(m, poff + sizeof(struct tcphdr));
1592 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1594 * Reset IP checksum and recompute TCP pseudo
1595 * checksum that NDIS specification requires.
1598 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) {
1599 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1601 htons((tcp->th_off << 2) + IPPROTO_TCP));
1602 /* No need to TSO, force IP checksum offload. */
1603 (*m_head)->m_pkthdr.csum_flags &= ~CSUM_TSO;
1604 (*m_head)->m_pkthdr.csum_flags |= CSUM_IP;
1606 tcp->th_sum = in_pseudo(ip->ip_src.s_addr,
1607 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1611 prod = sc->jme_cdata.jme_tx_prod;
1612 txd = &sc->jme_cdata.jme_txdesc[prod];
1614 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1615 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1616 if (error == EFBIG) {
1617 m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS);
1624 error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
1625 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
1631 } else if (error != 0)
1640 * Check descriptor overrun. Leave one free descriptor.
1641 * Since we always use 64bit address mode for transmitting,
1642 * each Tx request requires one more dummy descriptor.
1644 if (sc->jme_cdata.jme_tx_cnt + nsegs + 1 > JME_TX_RING_CNT - 1) {
1645 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
1652 /* Configure checksum offload and TSO. */
1653 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) {
1654 tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz <<
1656 cflags |= JME_TD_TSO;
1658 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0)
1659 cflags |= JME_TD_IPCSUM;
1660 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0)
1661 cflags |= JME_TD_TCPCSUM;
1662 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
1663 cflags |= JME_TD_UDPCSUM;
1665 /* Configure VLAN. */
1666 if ((m->m_flags & M_VLANTAG) != 0) {
1667 cflags |= (m->m_pkthdr.ether_vtag & JME_TD_VLAN_MASK);
1668 cflags |= JME_TD_VLAN_TAG;
1671 desc = &sc->jme_rdata.jme_tx_ring[prod];
1672 desc->flags = htole32(cflags);
1673 desc->buflen = htole32(tso_segsz);
1674 desc->addr_hi = htole32(m->m_pkthdr.len);
1676 sc->jme_cdata.jme_tx_cnt++;
1677 JME_DESC_INC(prod, JME_TX_RING_CNT);
1678 for (i = 0; i < nsegs; i++) {
1679 desc = &sc->jme_rdata.jme_tx_ring[prod];
1680 desc->flags = htole32(JME_TD_OWN | JME_TD_64BIT);
1681 desc->buflen = htole32(txsegs[i].ds_len);
1682 desc->addr_hi = htole32(JME_ADDR_HI(txsegs[i].ds_addr));
1683 desc->addr_lo = htole32(JME_ADDR_LO(txsegs[i].ds_addr));
1684 sc->jme_cdata.jme_tx_cnt++;
1685 JME_DESC_INC(prod, JME_TX_RING_CNT);
1688 /* Update producer index. */
1689 sc->jme_cdata.jme_tx_prod = prod;
1691 * Finally request interrupt and give the first descriptor
1692 * owenership to hardware.
1694 desc = txd->tx_desc;
1695 desc->flags |= htole32(JME_TD_OWN | JME_TD_INTR);
1698 txd->tx_ndesc = nsegs + 1;
1700 /* Sync descriptors. */
1701 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
1702 BUS_DMASYNC_PREWRITE);
1703 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
1704 sc->jme_cdata.jme_tx_ring_map,
1705 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1711 jme_tx_task(void *arg, int pending)
1715 ifp = (struct ifnet *)arg;
1720 jme_start(struct ifnet *ifp)
1722 struct jme_softc *sc;
1723 struct mbuf *m_head;
1730 if (sc->jme_cdata.jme_tx_cnt >= JME_TX_DESC_HIWAT)
1733 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1734 IFF_DRV_RUNNING || (sc->jme_flags & JME_FLAG_LINK) == 0) {
1739 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) {
1740 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1744 * Pack the data into the transmit ring. If we
1745 * don't have room, set the OACTIVE flag and wait
1746 * for the NIC to drain the ring.
1748 if (jme_encap(sc, &m_head)) {
1751 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1752 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1758 * If there's a BPF listener, bounce a copy of this frame
1761 ETHER_BPF_MTAP(ifp, m_head);
1766 * Reading TXCSR takes very long time under heavy load
1767 * so cache TXCSR value and writes the ORed value with
1768 * the kick command to the TXCSR. This saves one register
1771 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB |
1772 TXCSR_TXQ_N_START(TXCSR_TXQ0));
1773 /* Set a timeout in case the chip goes out to lunch. */
1774 sc->jme_watchdog_timer = JME_TX_TIMEOUT;
1781 jme_watchdog(struct jme_softc *sc)
1785 JME_LOCK_ASSERT(sc);
1787 if (sc->jme_watchdog_timer == 0 || --sc->jme_watchdog_timer)
1791 if ((sc->jme_flags & JME_FLAG_LINK) == 0) {
1792 if_printf(sc->jme_ifp, "watchdog timeout (missed link)\n");
1794 jme_init_locked(sc);
1798 if (sc->jme_cdata.jme_tx_cnt == 0) {
1799 if_printf(sc->jme_ifp,
1800 "watchdog timeout (missed Tx interrupts) -- recovering\n");
1801 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1802 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1806 if_printf(sc->jme_ifp, "watchdog timeout\n");
1808 jme_init_locked(sc);
1809 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1810 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
1814 jme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1816 struct jme_softc *sc;
1818 struct mii_data *mii;
1823 ifr = (struct ifreq *)data;
1827 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > JME_JUMBO_MTU ||
1828 ((sc->jme_flags & JME_FLAG_NOJUMBO) != 0 &&
1829 ifr->ifr_mtu > JME_MAX_MTU)) {
1834 if (ifp->if_mtu != ifr->ifr_mtu) {
1836 * No special configuration is required when interface
1837 * MTU is changed but availability of TSO/Tx checksum
1838 * offload should be chcked against new MTU size as
1839 * FIFO size is just 2K.
1842 if (ifr->ifr_mtu >= JME_TX_FIFO_SIZE) {
1843 ifp->if_capenable &=
1844 ~(IFCAP_TXCSUM | IFCAP_TSO4);
1846 ~(JME_CSUM_FEATURES | CSUM_TSO);
1847 VLAN_CAPABILITIES(ifp);
1849 ifp->if_mtu = ifr->ifr_mtu;
1850 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1851 jme_init_locked(sc);
1857 if ((ifp->if_flags & IFF_UP) != 0) {
1858 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1859 if (((ifp->if_flags ^ sc->jme_if_flags)
1860 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
1863 if ((sc->jme_flags & JME_FLAG_DETACH) == 0)
1864 jme_init_locked(sc);
1867 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1870 sc->jme_if_flags = ifp->if_flags;
1876 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1882 mii = device_get_softc(sc->jme_miibus);
1883 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
1887 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1888 if ((mask & IFCAP_TXCSUM) != 0 &&
1889 ifp->if_mtu < JME_TX_FIFO_SIZE) {
1890 if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) {
1891 ifp->if_capenable ^= IFCAP_TXCSUM;
1892 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0)
1893 ifp->if_hwassist |= JME_CSUM_FEATURES;
1895 ifp->if_hwassist &= ~JME_CSUM_FEATURES;
1898 if ((mask & IFCAP_RXCSUM) != 0 &&
1899 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) {
1900 ifp->if_capenable ^= IFCAP_RXCSUM;
1901 reg = CSR_READ_4(sc, JME_RXMAC);
1902 reg &= ~RXMAC_CSUM_ENB;
1903 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1904 reg |= RXMAC_CSUM_ENB;
1905 CSR_WRITE_4(sc, JME_RXMAC, reg);
1907 if ((mask & IFCAP_TSO4) != 0 &&
1908 ifp->if_mtu < JME_TX_FIFO_SIZE) {
1909 if ((IFCAP_TSO4 & ifp->if_capabilities) != 0) {
1910 ifp->if_capenable ^= IFCAP_TSO4;
1911 if ((IFCAP_TSO4 & ifp->if_capenable) != 0)
1912 ifp->if_hwassist |= CSUM_TSO;
1914 ifp->if_hwassist &= ~CSUM_TSO;
1917 if ((mask & IFCAP_WOL_MAGIC) != 0 &&
1918 (IFCAP_WOL_MAGIC & ifp->if_capabilities) != 0)
1919 ifp->if_capenable ^= IFCAP_WOL_MAGIC;
1920 if ((mask & IFCAP_VLAN_HWCSUM) != 0 &&
1921 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0)
1922 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM;
1923 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1924 (IFCAP_VLAN_HWTAGGING & ifp->if_capabilities) != 0) {
1925 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1929 VLAN_CAPABILITIES(ifp);
1932 error = ether_ioctl(ifp, cmd, data);
1940 jme_mac_config(struct jme_softc *sc)
1942 struct mii_data *mii;
1943 uint32_t ghc, rxmac, txmac, txpause;
1945 JME_LOCK_ASSERT(sc);
1947 mii = device_get_softc(sc->jme_miibus);
1949 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
1951 CSR_WRITE_4(sc, JME_GHC, 0);
1953 rxmac = CSR_READ_4(sc, JME_RXMAC);
1954 rxmac &= ~RXMAC_FC_ENB;
1955 txmac = CSR_READ_4(sc, JME_TXMAC);
1956 txmac &= ~(TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST);
1957 txpause = CSR_READ_4(sc, JME_TXPFC);
1958 txpause &= ~TXPFC_PAUSE_ENB;
1959 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
1960 ghc |= GHC_FULL_DUPLEX;
1961 rxmac &= ~RXMAC_COLL_DET_ENB;
1962 txmac &= ~(TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE |
1963 TXMAC_BACKOFF | TXMAC_CARRIER_EXT |
1966 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
1967 txpause |= TXPFC_PAUSE_ENB;
1968 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
1969 rxmac |= RXMAC_FC_ENB;
1971 /* Disable retry transmit timer/retry limit. */
1972 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) &
1973 ~(TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB));
1975 rxmac |= RXMAC_COLL_DET_ENB;
1976 txmac |= TXMAC_COLL_ENB | TXMAC_CARRIER_SENSE | TXMAC_BACKOFF;
1977 /* Enable retry transmit timer/retry limit. */
1978 CSR_WRITE_4(sc, JME_TXTRHD, CSR_READ_4(sc, JME_TXTRHD) |
1979 TXTRHD_RT_PERIOD_ENB | TXTRHD_RT_LIMIT_ENB);
1981 /* Reprogram Tx/Rx MACs with resolved speed/duplex. */
1982 switch (IFM_SUBTYPE(mii->mii_media_active)) {
1984 ghc |= GHC_SPEED_10;
1987 ghc |= GHC_SPEED_100;
1990 if ((sc->jme_flags & JME_FLAG_FASTETH) != 0)
1992 ghc |= GHC_SPEED_1000;
1993 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0)
1994 txmac |= TXMAC_CARRIER_EXT | TXMAC_FRAME_BURST;
1999 CSR_WRITE_4(sc, JME_GHC, ghc);
2000 CSR_WRITE_4(sc, JME_RXMAC, rxmac);
2001 CSR_WRITE_4(sc, JME_TXMAC, txmac);
2002 CSR_WRITE_4(sc, JME_TXPFC, txpause);
2006 jme_link_task(void *arg, int pending)
2008 struct jme_softc *sc;
2009 struct mii_data *mii;
2011 struct jme_txdesc *txd;
2015 sc = (struct jme_softc *)arg;
2018 mii = device_get_softc(sc->jme_miibus);
2020 if (mii == NULL || ifp == NULL ||
2021 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2026 sc->jme_flags &= ~JME_FLAG_LINK;
2027 if ((mii->mii_media_status & IFM_AVALID) != 0) {
2028 switch (IFM_SUBTYPE(mii->mii_media_active)) {
2031 sc->jme_flags |= JME_FLAG_LINK;
2034 if ((sc->jme_flags & JME_FLAG_FASTETH) == 0)
2036 sc->jme_flags |= JME_FLAG_LINK;
2044 * Disabling Rx/Tx MACs have a side-effect of resetting
2045 * JME_TXNDA/JME_RXNDA register to the first address of
2046 * Tx/Rx descriptor address. So driver should reset its
2047 * internal procucer/consumer pointer and reclaim any
2048 * allocated resources. Note, just saving the value of
2049 * JME_TXNDA and JME_RXNDA registers before stopping MAC
2050 * and restoring JME_TXNDA/JME_RXNDA register is not
2051 * sufficient to make sure correct MAC state because
2052 * stopping MAC operation can take a while and hardware
2053 * might have updated JME_TXNDA/JME_RXNDA registers
2054 * during the stop operation.
2056 /* Block execution of task. */
2057 taskqueue_block(sc->jme_tq);
2058 /* Disable interrupts and stop driver. */
2059 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2060 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2061 callout_stop(&sc->jme_tick_ch);
2062 sc->jme_watchdog_timer = 0;
2064 /* Stop receiver/transmitter. */
2068 /* XXX Drain all queued tasks. */
2070 taskqueue_drain(sc->jme_tq, &sc->jme_int_task);
2071 taskqueue_drain(sc->jme_tq, &sc->jme_tx_task);
2074 jme_rxintr(sc, JME_RX_RING_CNT);
2075 if (sc->jme_cdata.jme_rxhead != NULL)
2076 m_freem(sc->jme_cdata.jme_rxhead);
2077 JME_RXCHAIN_RESET(sc);
2079 if (sc->jme_cdata.jme_tx_cnt != 0) {
2080 /* Remove queued packets for transmit. */
2081 for (i = 0; i < JME_TX_RING_CNT; i++) {
2082 txd = &sc->jme_cdata.jme_txdesc[i];
2083 if (txd->tx_m != NULL) {
2085 sc->jme_cdata.jme_tx_tag,
2087 BUS_DMASYNC_POSTWRITE);
2089 sc->jme_cdata.jme_tx_tag,
2100 * Reuse configured Rx descriptors and reset
2101 * procuder/consumer index.
2103 sc->jme_cdata.jme_rx_cons = 0;
2104 atomic_set_int(&sc->jme_morework, 0);
2105 jme_init_tx_ring(sc);
2106 /* Initialize shadow status block. */
2109 /* Program MAC with resolved speed/duplex/flow-control. */
2110 if ((sc->jme_flags & JME_FLAG_LINK) != 0) {
2113 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2114 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2116 /* Set Tx ring address to the hardware. */
2117 paddr = JME_TX_RING_ADDR(sc, 0);
2118 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2119 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2121 /* Set Rx ring address to the hardware. */
2122 paddr = JME_RX_RING_ADDR(sc, 0);
2123 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2124 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2126 /* Restart receiver/transmitter. */
2127 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr | RXCSR_RX_ENB |
2129 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr | TXCSR_TX_ENB);
2132 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2133 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2134 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2135 /* Unblock execution of task. */
2136 taskqueue_unblock(sc->jme_tq);
2137 /* Reenable interrupts. */
2138 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2146 struct jme_softc *sc;
2149 sc = (struct jme_softc *)arg;
2151 status = CSR_READ_4(sc, JME_INTR_REQ_STATUS);
2152 if (status == 0 || status == 0xFFFFFFFF)
2153 return (FILTER_STRAY);
2154 /* Disable interrupts. */
2155 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2156 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2158 return (FILTER_HANDLED);
2162 jme_int_task(void *arg, int pending)
2164 struct jme_softc *sc;
2169 sc = (struct jme_softc *)arg;
2172 status = CSR_READ_4(sc, JME_INTR_STATUS);
2173 more = atomic_readandclear_int(&sc->jme_morework);
2175 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO;
2178 if ((status & JME_INTRS) == 0 || status == 0xFFFFFFFF)
2180 /* Reset PCC counter/timer and Ack interrupts. */
2181 status &= ~(INTR_TXQ_COMP | INTR_RXQ_COMP);
2182 if ((status & (INTR_TXQ_COAL | INTR_TXQ_COAL_TO)) != 0)
2183 status |= INTR_TXQ_COAL | INTR_TXQ_COAL_TO | INTR_TXQ_COMP;
2184 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0)
2185 status |= INTR_RXQ_COAL | INTR_RXQ_COAL_TO | INTR_RXQ_COMP;
2186 CSR_WRITE_4(sc, JME_INTR_STATUS, status);
2188 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
2189 if ((status & (INTR_RXQ_COAL | INTR_RXQ_COAL_TO)) != 0) {
2190 more = jme_rxintr(sc, sc->jme_process_limit);
2192 atomic_set_int(&sc->jme_morework, 1);
2194 if ((status & INTR_RXQ_DESC_EMPTY) != 0) {
2196 * Notify hardware availability of new Rx
2198 * Reading RXCSR takes very long time under
2199 * heavy load so cache RXCSR value and writes
2200 * the ORed value with the kick command to
2201 * the RXCSR. This saves one register access
2204 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr |
2205 RXCSR_RX_ENB | RXCSR_RXQ_START);
2208 * Reclaiming Tx buffers are deferred to make jme(4) run
2209 * without locks held.
2211 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
2212 taskqueue_enqueue(sc->jme_tq, &sc->jme_tx_task);
2215 if (more != 0 || (CSR_READ_4(sc, JME_INTR_STATUS) & JME_INTRS) != 0) {
2216 taskqueue_enqueue(sc->jme_tq, &sc->jme_int_task);
2220 /* Reenable interrupts. */
2221 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2225 jme_txeof(struct jme_softc *sc)
2228 struct jme_txdesc *txd;
2232 JME_LOCK_ASSERT(sc);
2236 cons = sc->jme_cdata.jme_tx_cons;
2237 if (cons == sc->jme_cdata.jme_tx_prod)
2240 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2241 sc->jme_cdata.jme_tx_ring_map,
2242 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2245 * Go through our Tx list and free mbufs for those
2246 * frames which have been transmitted.
2248 for (; cons != sc->jme_cdata.jme_tx_prod;) {
2249 txd = &sc->jme_cdata.jme_txdesc[cons];
2250 status = le32toh(txd->tx_desc->flags);
2251 if ((status & JME_TD_OWN) == JME_TD_OWN)
2254 if ((status & (JME_TD_TMOUT | JME_TD_RETRY_EXP)) != 0)
2258 if ((status & JME_TD_COLLISION) != 0)
2259 ifp->if_collisions +=
2260 le32toh(txd->tx_desc->buflen) &
2261 JME_TD_BUF_LEN_MASK;
2264 * Only the first descriptor of multi-descriptor
2265 * transmission is updated so driver have to skip entire
2266 * chained buffers for the transmiited frame. In other
2267 * words, JME_TD_OWN bit is valid only at the first
2268 * descriptor of a multi-descriptor transmission.
2270 for (nsegs = 0; nsegs < txd->tx_ndesc; nsegs++) {
2271 sc->jme_rdata.jme_tx_ring[cons].flags = 0;
2272 JME_DESC_INC(cons, JME_TX_RING_CNT);
2275 /* Reclaim transferred mbufs. */
2276 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap,
2277 BUS_DMASYNC_POSTWRITE);
2278 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag, txd->tx_dmamap);
2280 KASSERT(txd->tx_m != NULL,
2281 ("%s: freeing NULL mbuf!\n", __func__));
2284 sc->jme_cdata.jme_tx_cnt -= txd->tx_ndesc;
2285 KASSERT(sc->jme_cdata.jme_tx_cnt >= 0,
2286 ("%s: Active Tx desc counter was garbled\n", __func__));
2288 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2290 sc->jme_cdata.jme_tx_cons = cons;
2291 /* Unarm watchog timer when there is no pending descriptors in queue. */
2292 if (sc->jme_cdata.jme_tx_cnt == 0)
2293 sc->jme_watchdog_timer = 0;
2295 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2296 sc->jme_cdata.jme_tx_ring_map,
2297 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2300 static __inline void
2301 jme_discard_rxbuf(struct jme_softc *sc, int cons)
2303 struct jme_desc *desc;
2305 desc = &sc->jme_rdata.jme_rx_ring[cons];
2306 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2307 desc->buflen = htole32(MCLBYTES);
2310 /* Receive a frame. */
2312 jme_rxeof(struct jme_softc *sc)
2315 struct jme_desc *desc;
2316 struct jme_rxdesc *rxd;
2317 struct mbuf *mp, *m;
2318 uint32_t flags, status;
2319 int cons, count, nsegs;
2323 cons = sc->jme_cdata.jme_rx_cons;
2324 desc = &sc->jme_rdata.jme_rx_ring[cons];
2325 flags = le32toh(desc->flags);
2326 status = le32toh(desc->buflen);
2327 nsegs = JME_RX_NSEGS(status);
2328 sc->jme_cdata.jme_rxlen = JME_RX_BYTES(status) - JME_RX_PAD_BYTES;
2329 if ((status & JME_RX_ERR_STAT) != 0) {
2331 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2332 #ifdef JME_SHOW_ERRORS
2333 device_printf(sc->jme_dev, "%s : receive error = 0x%b\n",
2334 __func__, JME_RX_ERR(status), JME_RX_ERR_BITS);
2336 sc->jme_cdata.jme_rx_cons += nsegs;
2337 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2341 for (count = 0; count < nsegs; count++,
2342 JME_DESC_INC(cons, JME_RX_RING_CNT)) {
2343 rxd = &sc->jme_cdata.jme_rxdesc[cons];
2345 /* Add a new receive buffer to the ring. */
2346 if (jme_newbuf(sc, rxd) != 0) {
2349 jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
2350 if (sc->jme_cdata.jme_rxhead != NULL) {
2351 m_freem(sc->jme_cdata.jme_rxhead);
2352 JME_RXCHAIN_RESET(sc);
2358 * Assume we've received a full sized frame.
2359 * Actual size is fixed when we encounter the end of
2360 * multi-segmented frame.
2362 mp->m_len = MCLBYTES;
2364 /* Chain received mbufs. */
2365 if (sc->jme_cdata.jme_rxhead == NULL) {
2366 sc->jme_cdata.jme_rxhead = mp;
2367 sc->jme_cdata.jme_rxtail = mp;
2370 * Receive processor can receive a maximum frame
2371 * size of 65535 bytes.
2373 mp->m_flags &= ~M_PKTHDR;
2374 sc->jme_cdata.jme_rxtail->m_next = mp;
2375 sc->jme_cdata.jme_rxtail = mp;
2378 if (count == nsegs - 1) {
2379 /* Last desc. for this frame. */
2380 m = sc->jme_cdata.jme_rxhead;
2381 m->m_flags |= M_PKTHDR;
2382 m->m_pkthdr.len = sc->jme_cdata.jme_rxlen;
2384 /* Set first mbuf size. */
2385 m->m_len = MCLBYTES - JME_RX_PAD_BYTES;
2386 /* Set last mbuf size. */
2387 mp->m_len = sc->jme_cdata.jme_rxlen -
2388 ((MCLBYTES - JME_RX_PAD_BYTES) +
2389 (MCLBYTES * (nsegs - 2)));
2391 m->m_len = sc->jme_cdata.jme_rxlen;
2392 m->m_pkthdr.rcvif = ifp;
2395 * Account for 10bytes auto padding which is used
2396 * to align IP header on 32bit boundary. Also note,
2397 * CRC bytes is automatically removed by the
2400 m->m_data += JME_RX_PAD_BYTES;
2402 /* Set checksum information. */
2403 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 &&
2404 (flags & JME_RD_IPV4) != 0) {
2405 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2406 if ((flags & JME_RD_IPCSUM) != 0)
2407 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2408 if (((flags & JME_RD_MORE_FRAG) == 0) &&
2409 ((flags & (JME_RD_TCP | JME_RD_TCPCSUM)) ==
2410 (JME_RD_TCP | JME_RD_TCPCSUM) ||
2411 (flags & (JME_RD_UDP | JME_RD_UDPCSUM)) ==
2412 (JME_RD_UDP | JME_RD_UDPCSUM))) {
2413 m->m_pkthdr.csum_flags |=
2414 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2415 m->m_pkthdr.csum_data = 0xffff;
2419 /* Check for VLAN tagged packets. */
2420 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
2421 (flags & JME_RD_VLAN_TAG) != 0) {
2422 m->m_pkthdr.ether_vtag =
2423 flags & JME_RD_VLAN_MASK;
2424 m->m_flags |= M_VLANTAG;
2429 (*ifp->if_input)(ifp, m);
2431 /* Reset mbuf chains. */
2432 JME_RXCHAIN_RESET(sc);
2436 sc->jme_cdata.jme_rx_cons += nsegs;
2437 sc->jme_cdata.jme_rx_cons %= JME_RX_RING_CNT;
2441 jme_rxintr(struct jme_softc *sc, int count)
2443 struct jme_desc *desc;
2444 int nsegs, prog, pktlen;
2446 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2447 sc->jme_cdata.jme_rx_ring_map,
2448 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2450 for (prog = 0; count > 0; prog++) {
2451 desc = &sc->jme_rdata.jme_rx_ring[sc->jme_cdata.jme_rx_cons];
2452 if ((le32toh(desc->flags) & JME_RD_OWN) == JME_RD_OWN)
2454 if ((le32toh(desc->buflen) & JME_RD_VALID) == 0)
2456 nsegs = JME_RX_NSEGS(le32toh(desc->buflen));
2458 * Check number of segments against received bytes.
2459 * Non-matching value would indicate that hardware
2460 * is still trying to update Rx descriptors. I'm not
2461 * sure whether this check is needed.
2463 pktlen = JME_RX_BYTES(le32toh(desc->buflen));
2464 if (nsegs != ((pktlen + (MCLBYTES - 1)) / MCLBYTES))
2467 /* Received a frame. */
2473 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2474 sc->jme_cdata.jme_rx_ring_map,
2475 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2477 return (count > 0 ? 0 : EAGAIN);
2483 struct jme_softc *sc;
2484 struct mii_data *mii;
2486 sc = (struct jme_softc *)arg;
2488 JME_LOCK_ASSERT(sc);
2490 mii = device_get_softc(sc->jme_miibus);
2493 * Reclaim Tx buffers that have been completed. It's not
2494 * needed here but it would release allocated mbuf chains
2495 * faster and limit the maximum delay to a hz.
2499 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2503 jme_reset(struct jme_softc *sc)
2506 /* Stop receiver, transmitter. */
2509 CSR_WRITE_4(sc, JME_GHC, GHC_RESET);
2511 CSR_WRITE_4(sc, JME_GHC, 0);
2517 struct jme_softc *sc;
2519 sc = (struct jme_softc *)xsc;
2521 jme_init_locked(sc);
2526 jme_init_locked(struct jme_softc *sc)
2529 struct mii_data *mii;
2530 uint8_t eaddr[ETHER_ADDR_LEN];
2535 JME_LOCK_ASSERT(sc);
2538 mii = device_get_softc(sc->jme_miibus);
2541 * Cancel any pending I/O.
2546 * Reset the chip to a known state.
2550 /* Init descriptors. */
2551 error = jme_init_rx_ring(sc);
2553 device_printf(sc->jme_dev,
2554 "%s: initialization failed: no memory for Rx buffers.\n",
2559 jme_init_tx_ring(sc);
2560 /* Initialize shadow status block. */
2563 /* Reprogram the station address. */
2564 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
2565 CSR_WRITE_4(sc, JME_PAR0,
2566 eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]);
2567 CSR_WRITE_4(sc, JME_PAR1, eaddr[5] << 8 | eaddr[4]);
2570 * Configure Tx queue.
2571 * Tx priority queue weight value : 0
2572 * Tx FIFO threshold for processing next packet : 16QW
2573 * Maximum Tx DMA length : 512
2574 * Allow Tx DMA burst.
2576 sc->jme_txcsr = TXCSR_TXQ_N_SEL(TXCSR_TXQ0);
2577 sc->jme_txcsr |= TXCSR_TXQ_WEIGHT(TXCSR_TXQ_WEIGHT_MIN);
2578 sc->jme_txcsr |= TXCSR_FIFO_THRESH_16QW;
2579 sc->jme_txcsr |= sc->jme_tx_dma_size;
2580 sc->jme_txcsr |= TXCSR_DMA_BURST;
2581 CSR_WRITE_4(sc, JME_TXCSR, sc->jme_txcsr);
2583 /* Set Tx descriptor counter. */
2584 CSR_WRITE_4(sc, JME_TXQDC, JME_TX_RING_CNT);
2586 /* Set Tx ring address to the hardware. */
2587 paddr = JME_TX_RING_ADDR(sc, 0);
2588 CSR_WRITE_4(sc, JME_TXDBA_HI, JME_ADDR_HI(paddr));
2589 CSR_WRITE_4(sc, JME_TXDBA_LO, JME_ADDR_LO(paddr));
2591 /* Configure TxMAC parameters. */
2592 reg = TXMAC_IFG1_DEFAULT | TXMAC_IFG2_DEFAULT | TXMAC_IFG_ENB;
2593 reg |= TXMAC_THRESH_1_PKT;
2594 reg |= TXMAC_CRC_ENB | TXMAC_PAD_ENB;
2595 CSR_WRITE_4(sc, JME_TXMAC, reg);
2598 * Configure Rx queue.
2599 * FIFO full threshold for transmitting Tx pause packet : 128T
2600 * FIFO threshold for processing next packet : 128QW
2602 * Max Rx DMA length : 128
2603 * Rx descriptor retry : 32
2604 * Rx descriptor retry time gap : 256ns
2605 * Don't receive runt/bad frame.
2607 sc->jme_rxcsr = RXCSR_FIFO_FTHRESH_128T;
2609 * Since Rx FIFO size is 4K bytes, receiving frames larger
2610 * than 4K bytes will suffer from Rx FIFO overruns. So
2611 * decrease FIFO threshold to reduce the FIFO overruns for
2612 * frames larger than 4000 bytes.
2613 * For best performance of standard MTU sized frames use
2614 * maximum allowable FIFO threshold, 128QW.
2616 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN +
2617 ETHER_CRC_LEN) > JME_RX_FIFO_SIZE)
2618 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_16QW;
2620 sc->jme_rxcsr |= RXCSR_FIFO_THRESH_128QW;
2621 sc->jme_rxcsr |= sc->jme_rx_dma_size | RXCSR_RXQ_N_SEL(RXCSR_RXQ0);
2622 sc->jme_rxcsr |= RXCSR_DESC_RT_CNT(RXCSR_DESC_RT_CNT_DEFAULT);
2623 sc->jme_rxcsr |= RXCSR_DESC_RT_GAP_256 & RXCSR_DESC_RT_GAP_MASK;
2624 CSR_WRITE_4(sc, JME_RXCSR, sc->jme_rxcsr);
2626 /* Set Rx descriptor counter. */
2627 CSR_WRITE_4(sc, JME_RXQDC, JME_RX_RING_CNT);
2629 /* Set Rx ring address to the hardware. */
2630 paddr = JME_RX_RING_ADDR(sc, 0);
2631 CSR_WRITE_4(sc, JME_RXDBA_HI, JME_ADDR_HI(paddr));
2632 CSR_WRITE_4(sc, JME_RXDBA_LO, JME_ADDR_LO(paddr));
2634 /* Clear receive filter. */
2635 CSR_WRITE_4(sc, JME_RXMAC, 0);
2636 /* Set up the receive filter. */
2641 * Disable all WOL bits as WOL can interfere normal Rx
2642 * operation. Also clear WOL detection status bits.
2644 reg = CSR_READ_4(sc, JME_PMCS);
2645 reg &= ~PMCS_WOL_ENB_MASK;
2646 CSR_WRITE_4(sc, JME_PMCS, reg);
2648 reg = CSR_READ_4(sc, JME_RXMAC);
2650 * Pad 10bytes right before received frame. This will greatly
2651 * help Rx performance on strict-alignment architectures as
2652 * it does not need to copy the frame to align the payload.
2654 reg |= RXMAC_PAD_10BYTES;
2655 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
2656 reg |= RXMAC_CSUM_ENB;
2657 CSR_WRITE_4(sc, JME_RXMAC, reg);
2659 /* Configure general purpose reg0 */
2660 reg = CSR_READ_4(sc, JME_GPREG0);
2661 reg &= ~GPREG0_PCC_UNIT_MASK;
2662 /* Set PCC timer resolution to micro-seconds unit. */
2663 reg |= GPREG0_PCC_UNIT_US;
2665 * Disable all shadow register posting as we have to read
2666 * JME_INTR_STATUS register in jme_int_task. Also it seems
2667 * that it's hard to synchronize interrupt status between
2668 * hardware and software with shadow posting due to
2669 * requirements of bus_dmamap_sync(9).
2671 reg |= GPREG0_SH_POST_DW7_DIS | GPREG0_SH_POST_DW6_DIS |
2672 GPREG0_SH_POST_DW5_DIS | GPREG0_SH_POST_DW4_DIS |
2673 GPREG0_SH_POST_DW3_DIS | GPREG0_SH_POST_DW2_DIS |
2674 GPREG0_SH_POST_DW1_DIS | GPREG0_SH_POST_DW0_DIS;
2675 /* Disable posting of DW0. */
2676 reg &= ~GPREG0_POST_DW0_ENB;
2677 /* Clear PME message. */
2678 reg &= ~GPREG0_PME_ENB;
2679 /* Set PHY address. */
2680 reg &= ~GPREG0_PHY_ADDR_MASK;
2681 reg |= sc->jme_phyaddr;
2682 CSR_WRITE_4(sc, JME_GPREG0, reg);
2684 /* Configure Tx queue 0 packet completion coalescing. */
2685 reg = (sc->jme_tx_coal_to << PCCTX_COAL_TO_SHIFT) &
2687 reg |= (sc->jme_tx_coal_pkt << PCCTX_COAL_PKT_SHIFT) &
2688 PCCTX_COAL_PKT_MASK;
2689 reg |= PCCTX_COAL_TXQ0;
2690 CSR_WRITE_4(sc, JME_PCCTX, reg);
2692 /* Configure Rx queue 0 packet completion coalescing. */
2693 reg = (sc->jme_rx_coal_to << PCCRX_COAL_TO_SHIFT) &
2695 reg |= (sc->jme_rx_coal_pkt << PCCRX_COAL_PKT_SHIFT) &
2696 PCCRX_COAL_PKT_MASK;
2697 CSR_WRITE_4(sc, JME_PCCRX0, reg);
2699 /* Configure shadow status block but don't enable posting. */
2700 paddr = sc->jme_rdata.jme_ssb_block_paddr;
2701 CSR_WRITE_4(sc, JME_SHBASE_ADDR_HI, JME_ADDR_HI(paddr));
2702 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO, JME_ADDR_LO(paddr));
2704 /* Disable Timer 1 and Timer 2. */
2705 CSR_WRITE_4(sc, JME_TIMER1, 0);
2706 CSR_WRITE_4(sc, JME_TIMER2, 0);
2708 /* Configure retry transmit period, retry limit value. */
2709 CSR_WRITE_4(sc, JME_TXTRHD,
2710 ((TXTRHD_RT_PERIOD_DEFAULT << TXTRHD_RT_PERIOD_SHIFT) &
2711 TXTRHD_RT_PERIOD_MASK) |
2712 ((TXTRHD_RT_LIMIT_DEFAULT << TXTRHD_RT_LIMIT_SHIFT) &
2713 TXTRHD_RT_LIMIT_SHIFT));
2716 CSR_WRITE_4(sc, JME_RSSC, RSSC_DIS_RSS);
2718 /* Initialize the interrupt mask. */
2719 CSR_WRITE_4(sc, JME_INTR_MASK_SET, JME_INTRS);
2720 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2723 * Enabling Tx/Rx DMA engines and Rx queue processing is
2724 * done after detection of valid link in jme_link_task.
2727 sc->jme_flags &= ~JME_FLAG_LINK;
2728 /* Set the current media. */
2731 callout_reset(&sc->jme_tick_ch, hz, jme_tick, sc);
2733 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2734 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2738 jme_stop(struct jme_softc *sc)
2741 struct jme_txdesc *txd;
2742 struct jme_rxdesc *rxd;
2745 JME_LOCK_ASSERT(sc);
2747 * Mark the interface down and cancel the watchdog timer.
2750 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2751 sc->jme_flags &= ~JME_FLAG_LINK;
2752 callout_stop(&sc->jme_tick_ch);
2753 sc->jme_watchdog_timer = 0;
2756 * Disable interrupts.
2758 CSR_WRITE_4(sc, JME_INTR_MASK_CLR, JME_INTRS);
2759 CSR_WRITE_4(sc, JME_INTR_STATUS, 0xFFFFFFFF);
2761 /* Disable updating shadow status block. */
2762 CSR_WRITE_4(sc, JME_SHBASE_ADDR_LO,
2763 CSR_READ_4(sc, JME_SHBASE_ADDR_LO) & ~SHBASE_POST_ENB);
2765 /* Stop receiver, transmitter. */
2769 /* Reclaim Rx/Tx buffers that have been completed. */
2770 jme_rxintr(sc, JME_RX_RING_CNT);
2771 if (sc->jme_cdata.jme_rxhead != NULL)
2772 m_freem(sc->jme_cdata.jme_rxhead);
2773 JME_RXCHAIN_RESET(sc);
2776 * Free RX and TX mbufs still in the queues.
2778 for (i = 0; i < JME_RX_RING_CNT; i++) {
2779 rxd = &sc->jme_cdata.jme_rxdesc[i];
2780 if (rxd->rx_m != NULL) {
2781 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag,
2782 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
2783 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag,
2789 for (i = 0; i < JME_TX_RING_CNT; i++) {
2790 txd = &sc->jme_cdata.jme_txdesc[i];
2791 if (txd->tx_m != NULL) {
2792 bus_dmamap_sync(sc->jme_cdata.jme_tx_tag,
2793 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2794 bus_dmamap_unload(sc->jme_cdata.jme_tx_tag,
2804 jme_stop_tx(struct jme_softc *sc)
2809 reg = CSR_READ_4(sc, JME_TXCSR);
2810 if ((reg & TXCSR_TX_ENB) == 0)
2812 reg &= ~TXCSR_TX_ENB;
2813 CSR_WRITE_4(sc, JME_TXCSR, reg);
2814 for (i = JME_TIMEOUT; i > 0; i--) {
2816 if ((CSR_READ_4(sc, JME_TXCSR) & TXCSR_TX_ENB) == 0)
2820 device_printf(sc->jme_dev, "stopping transmitter timeout!\n");
2824 jme_stop_rx(struct jme_softc *sc)
2829 reg = CSR_READ_4(sc, JME_RXCSR);
2830 if ((reg & RXCSR_RX_ENB) == 0)
2832 reg &= ~RXCSR_RX_ENB;
2833 CSR_WRITE_4(sc, JME_RXCSR, reg);
2834 for (i = JME_TIMEOUT; i > 0; i--) {
2836 if ((CSR_READ_4(sc, JME_RXCSR) & RXCSR_RX_ENB) == 0)
2840 device_printf(sc->jme_dev, "stopping recevier timeout!\n");
2844 jme_init_tx_ring(struct jme_softc *sc)
2846 struct jme_ring_data *rd;
2847 struct jme_txdesc *txd;
2850 sc->jme_cdata.jme_tx_prod = 0;
2851 sc->jme_cdata.jme_tx_cons = 0;
2852 sc->jme_cdata.jme_tx_cnt = 0;
2854 rd = &sc->jme_rdata;
2855 bzero(rd->jme_tx_ring, JME_TX_RING_SIZE);
2856 for (i = 0; i < JME_TX_RING_CNT; i++) {
2857 txd = &sc->jme_cdata.jme_txdesc[i];
2859 txd->tx_desc = &rd->jme_tx_ring[i];
2863 bus_dmamap_sync(sc->jme_cdata.jme_tx_ring_tag,
2864 sc->jme_cdata.jme_tx_ring_map,
2865 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2869 jme_init_ssb(struct jme_softc *sc)
2871 struct jme_ring_data *rd;
2873 rd = &sc->jme_rdata;
2874 bzero(rd->jme_ssb_block, JME_SSB_SIZE);
2875 bus_dmamap_sync(sc->jme_cdata.jme_ssb_tag, sc->jme_cdata.jme_ssb_map,
2876 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2880 jme_init_rx_ring(struct jme_softc *sc)
2882 struct jme_ring_data *rd;
2883 struct jme_rxdesc *rxd;
2886 sc->jme_cdata.jme_rx_cons = 0;
2887 JME_RXCHAIN_RESET(sc);
2888 atomic_set_int(&sc->jme_morework, 0);
2890 rd = &sc->jme_rdata;
2891 bzero(rd->jme_rx_ring, JME_RX_RING_SIZE);
2892 for (i = 0; i < JME_RX_RING_CNT; i++) {
2893 rxd = &sc->jme_cdata.jme_rxdesc[i];
2895 rxd->rx_desc = &rd->jme_rx_ring[i];
2896 if (jme_newbuf(sc, rxd) != 0)
2900 bus_dmamap_sync(sc->jme_cdata.jme_rx_ring_tag,
2901 sc->jme_cdata.jme_rx_ring_map,
2902 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2908 jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
2910 struct jme_desc *desc;
2912 bus_dma_segment_t segs[1];
2916 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2920 * JMC250 has 64bit boundary alignment limitation so jme(4)
2921 * takes advantage of 10 bytes padding feature of hardware
2922 * in order not to copy entire frame to align IP header on
2925 m->m_len = m->m_pkthdr.len = MCLBYTES;
2927 if (bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_rx_tag,
2928 sc->jme_cdata.jme_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2932 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2934 if (rxd->rx_m != NULL) {
2935 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2936 BUS_DMASYNC_POSTREAD);
2937 bus_dmamap_unload(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap);
2939 map = rxd->rx_dmamap;
2940 rxd->rx_dmamap = sc->jme_cdata.jme_rx_sparemap;
2941 sc->jme_cdata.jme_rx_sparemap = map;
2942 bus_dmamap_sync(sc->jme_cdata.jme_rx_tag, rxd->rx_dmamap,
2943 BUS_DMASYNC_PREREAD);
2946 desc = rxd->rx_desc;
2947 desc->buflen = htole32(segs[0].ds_len);
2948 desc->addr_lo = htole32(JME_ADDR_LO(segs[0].ds_addr));
2949 desc->addr_hi = htole32(JME_ADDR_HI(segs[0].ds_addr));
2950 desc->flags = htole32(JME_RD_OWN | JME_RD_INTR | JME_RD_64BIT);
2956 jme_set_vlan(struct jme_softc *sc)
2961 JME_LOCK_ASSERT(sc);
2964 reg = CSR_READ_4(sc, JME_RXMAC);
2965 reg &= ~RXMAC_VLAN_ENB;
2966 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2967 reg |= RXMAC_VLAN_ENB;
2968 CSR_WRITE_4(sc, JME_RXMAC, reg);
2972 jme_set_filter(struct jme_softc *sc)
2975 struct ifmultiaddr *ifma;
2980 JME_LOCK_ASSERT(sc);
2984 rxcfg = CSR_READ_4(sc, JME_RXMAC);
2985 rxcfg &= ~ (RXMAC_BROADCAST | RXMAC_PROMISC | RXMAC_MULTICAST |
2987 /* Always accept frames destined to our station address. */
2988 rxcfg |= RXMAC_UNICAST;
2989 if ((ifp->if_flags & IFF_BROADCAST) != 0)
2990 rxcfg |= RXMAC_BROADCAST;
2991 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2992 if ((ifp->if_flags & IFF_PROMISC) != 0)
2993 rxcfg |= RXMAC_PROMISC;
2994 if ((ifp->if_flags & IFF_ALLMULTI) != 0)
2995 rxcfg |= RXMAC_ALLMULTI;
2996 CSR_WRITE_4(sc, JME_MAR0, 0xFFFFFFFF);
2997 CSR_WRITE_4(sc, JME_MAR1, 0xFFFFFFFF);
2998 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3003 * Set up the multicast address filter by passing all multicast
3004 * addresses through a CRC generator, and then using the low-order
3005 * 6 bits as an index into the 64 bit multicast hash table. The
3006 * high order bits select the register, while the rest of the bits
3007 * select the bit within the register.
3009 rxcfg |= RXMAC_MULTICAST;
3010 bzero(mchash, sizeof(mchash));
3013 TAILQ_FOREACH(ifma, &sc->jme_ifp->if_multiaddrs, ifma_link) {
3014 if (ifma->ifma_addr->sa_family != AF_LINK)
3016 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
3017 ifma->ifma_addr), ETHER_ADDR_LEN);
3019 /* Just want the 6 least significant bits. */
3022 /* Set the corresponding bit in the hash table. */
3023 mchash[crc >> 5] |= 1 << (crc & 0x1f);
3025 IF_ADDR_UNLOCK(ifp);
3027 CSR_WRITE_4(sc, JME_MAR0, mchash[0]);
3028 CSR_WRITE_4(sc, JME_MAR1, mchash[1]);
3029 CSR_WRITE_4(sc, JME_RXMAC, rxcfg);
3033 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3039 value = *(int *)arg1;
3040 error = sysctl_handle_int(oidp, &value, 0, req);
3041 if (error || req->newptr == NULL)
3043 if (value < low || value > high)
3045 *(int *)arg1 = value;
3051 sysctl_hw_jme_tx_coal_to(SYSCTL_HANDLER_ARGS)
3053 return (sysctl_int_range(oidp, arg1, arg2, req,
3054 PCCTX_COAL_TO_MIN, PCCTX_COAL_TO_MAX));
3058 sysctl_hw_jme_tx_coal_pkt(SYSCTL_HANDLER_ARGS)
3060 return (sysctl_int_range(oidp, arg1, arg2, req,
3061 PCCTX_COAL_PKT_MIN, PCCTX_COAL_PKT_MAX));
3065 sysctl_hw_jme_rx_coal_to(SYSCTL_HANDLER_ARGS)
3067 return (sysctl_int_range(oidp, arg1, arg2, req,
3068 PCCRX_COAL_TO_MIN, PCCRX_COAL_TO_MAX));
3072 sysctl_hw_jme_rx_coal_pkt(SYSCTL_HANDLER_ARGS)
3074 return (sysctl_int_range(oidp, arg1, arg2, req,
3075 PCCRX_COAL_PKT_MIN, PCCRX_COAL_PKT_MAX));
3079 sysctl_hw_jme_proc_limit(SYSCTL_HANDLER_ARGS)
3081 return (sysctl_int_range(oidp, arg1, arg2, req,
3082 JME_PROC_MIN, JME_PROC_MAX));