2 * Copyright (c) 2015-2016 Kevin Lo <kevlo@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/condvar.h>
34 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/mutex.h>
39 #include <sys/socket.h>
40 #include <sys/sysctl.h>
41 #include <sys/unistd.h>
44 #include <net/if_var.h>
45 #include <net/if_media.h>
47 /* needed for checksum offload */
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
51 #include <dev/mii/mii.h>
52 #include <dev/mii/miivar.h>
54 #include <dev/usb/usb.h>
55 #include <dev/usb/usbdi.h>
56 #include <dev/usb/usbdi_util.h>
59 #define USB_DEBUG_VAR ure_debug
60 #include <dev/usb/usb_debug.h>
61 #include <dev/usb/usb_process.h>
63 #include <dev/usb/net/usb_ethernet.h>
64 #include <dev/usb/net/if_urereg.h>
66 #include "miibus_if.h"
68 #include "opt_inet6.h"
71 static int ure_debug = 0;
73 static SYSCTL_NODE(_hw_usb, OID_AUTO, ure, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
75 SYSCTL_INT(_hw_usb_ure, OID_AUTO, debug, CTLFLAG_RWTUN, &ure_debug, 0,
81 #define DEVPRINTFN(n,dev,fmt,...) do { \
82 if ((USB_DEBUG_VAR) >= (n)) { \
83 device_printf((dev), "%s: " fmt, \
84 __FUNCTION__ ,##__VA_ARGS__); \
87 #define DEVPRINTF(...) DEVPRINTFN(1, __VA_ARGS__)
89 #define DEVPRINTF(...) do { } while (0)
90 #define DEVPRINTFN(...) do { } while (0)
95 * Various supported device vendors/products.
97 static const STRUCT_USB_HOST_ID ure_devs[] = {
98 #define URE_DEV(v,p,i) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i) }
99 URE_DEV(LENOVO, RTL8153, 0),
100 URE_DEV(LENOVO, TBT3LAN, 0),
101 URE_DEV(LENOVO, ONELINK, 0),
102 URE_DEV(LENOVO, USBCLAN, 0),
103 URE_DEV(NVIDIA, RTL8153, 0),
104 URE_DEV(REALTEK, RTL8152, URE_FLAG_8152),
105 URE_DEV(REALTEK, RTL8153, 0),
106 URE_DEV(TPLINK, RTL8153, 0),
110 static device_probe_t ure_probe;
111 static device_attach_t ure_attach;
112 static device_detach_t ure_detach;
114 static usb_callback_t ure_bulk_read_callback;
115 static usb_callback_t ure_bulk_write_callback;
117 static miibus_readreg_t ure_miibus_readreg;
118 static miibus_writereg_t ure_miibus_writereg;
119 static miibus_statchg_t ure_miibus_statchg;
121 static uether_fn_t ure_attach_post;
122 static uether_fn_t ure_init;
123 static uether_fn_t ure_stop;
124 static uether_fn_t ure_start;
125 static uether_fn_t ure_tick;
126 static uether_fn_t ure_rxfilter;
128 static int ure_ctl(struct ure_softc *, uint8_t, uint16_t, uint16_t,
130 static int ure_read_mem(struct ure_softc *, uint16_t, uint16_t, void *,
132 static int ure_write_mem(struct ure_softc *, uint16_t, uint16_t, void *,
134 static uint8_t ure_read_1(struct ure_softc *, uint16_t, uint16_t);
135 static uint16_t ure_read_2(struct ure_softc *, uint16_t, uint16_t);
136 static uint32_t ure_read_4(struct ure_softc *, uint16_t, uint16_t);
137 static int ure_write_1(struct ure_softc *, uint16_t, uint16_t, uint32_t);
138 static int ure_write_2(struct ure_softc *, uint16_t, uint16_t, uint32_t);
139 static int ure_write_4(struct ure_softc *, uint16_t, uint16_t, uint32_t);
140 static uint16_t ure_ocp_reg_read(struct ure_softc *, uint16_t);
141 static void ure_ocp_reg_write(struct ure_softc *, uint16_t, uint16_t);
143 static int ure_sysctl_chipver(SYSCTL_HANDLER_ARGS);
145 static void ure_read_chipver(struct ure_softc *);
146 static int ure_attach_post_sub(struct usb_ether *);
147 static void ure_reset(struct ure_softc *);
148 static int ure_ifmedia_upd(struct ifnet *);
149 static void ure_ifmedia_sts(struct ifnet *, struct ifmediareq *);
150 static int ure_ioctl(struct ifnet *, u_long, caddr_t);
151 static void ure_rtl8152_init(struct ure_softc *);
152 static void ure_rtl8153_init(struct ure_softc *);
153 static void ure_disable_teredo(struct ure_softc *);
154 static void ure_init_fifo(struct ure_softc *);
155 static void ure_rxcsum(int capenb, struct ure_rxpkt *rp, struct mbuf *m);
156 static int ure_txcsum(struct mbuf *m, int caps, uint32_t *regout);
158 static const struct usb_config ure_config_rx[URE_N_TRANSFER] = {
161 .endpoint = UE_ADDR_ANY,
162 .direction = UE_DIR_IN,
163 .bufsize = URE_TRANSFER_SIZE,
164 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
165 .callback = ure_bulk_read_callback,
166 .timeout = 0, /* no timeout */
170 .endpoint = UE_ADDR_ANY,
171 .direction = UE_DIR_IN,
172 .bufsize = URE_TRANSFER_SIZE,
173 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
174 .callback = ure_bulk_read_callback,
175 .timeout = 0, /* no timeout */
177 #if URE_N_TRANSFER == 4
180 .endpoint = UE_ADDR_ANY,
181 .direction = UE_DIR_IN,
182 .bufsize = URE_TRANSFER_SIZE,
183 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
184 .callback = ure_bulk_read_callback,
185 .timeout = 0, /* no timeout */
189 .endpoint = UE_ADDR_ANY,
190 .direction = UE_DIR_IN,
191 .bufsize = URE_TRANSFER_SIZE,
192 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
193 .callback = ure_bulk_read_callback,
194 .timeout = 0, /* no timeout */
199 static const struct usb_config ure_config_tx[URE_N_TRANSFER] = {
202 .endpoint = UE_ADDR_ANY,
203 .direction = UE_DIR_OUT,
204 .bufsize = URE_TRANSFER_SIZE,
205 .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
206 .callback = ure_bulk_write_callback,
207 .timeout = 10000, /* 10 seconds */
211 .endpoint = UE_ADDR_ANY,
212 .direction = UE_DIR_OUT,
213 .bufsize = URE_TRANSFER_SIZE,
214 .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
215 .callback = ure_bulk_write_callback,
216 .timeout = 10000, /* 10 seconds */
218 #if URE_N_TRANSFER == 4
221 .endpoint = UE_ADDR_ANY,
222 .direction = UE_DIR_OUT,
223 .bufsize = URE_TRANSFER_SIZE,
224 .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
225 .callback = ure_bulk_write_callback,
226 .timeout = 10000, /* 10 seconds */
230 .endpoint = UE_ADDR_ANY,
231 .direction = UE_DIR_OUT,
232 .bufsize = URE_TRANSFER_SIZE,
233 .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
234 .callback = ure_bulk_write_callback,
235 .timeout = 10000, /* 10 seconds */
240 static device_method_t ure_methods[] = {
241 /* Device interface. */
242 DEVMETHOD(device_probe, ure_probe),
243 DEVMETHOD(device_attach, ure_attach),
244 DEVMETHOD(device_detach, ure_detach),
247 DEVMETHOD(miibus_readreg, ure_miibus_readreg),
248 DEVMETHOD(miibus_writereg, ure_miibus_writereg),
249 DEVMETHOD(miibus_statchg, ure_miibus_statchg),
254 static driver_t ure_driver = {
256 .methods = ure_methods,
257 .size = sizeof(struct ure_softc),
260 static devclass_t ure_devclass;
262 DRIVER_MODULE(ure, uhub, ure_driver, ure_devclass, NULL, NULL);
263 DRIVER_MODULE(miibus, ure, miibus_driver, miibus_devclass, NULL, NULL);
264 MODULE_DEPEND(ure, uether, 1, 1, 1);
265 MODULE_DEPEND(ure, usb, 1, 1, 1);
266 MODULE_DEPEND(ure, ether, 1, 1, 1);
267 MODULE_DEPEND(ure, miibus, 1, 1, 1);
268 MODULE_VERSION(ure, 1);
269 USB_PNP_HOST_INFO(ure_devs);
271 static const struct usb_ether_methods ure_ue_methods = {
272 .ue_attach_post = ure_attach_post,
273 .ue_attach_post_sub = ure_attach_post_sub,
274 .ue_start = ure_start,
278 .ue_setmulti = ure_rxfilter,
279 .ue_setpromisc = ure_rxfilter,
280 .ue_mii_upd = ure_ifmedia_upd,
281 .ue_mii_sts = ure_ifmedia_sts,
285 ure_ctl(struct ure_softc *sc, uint8_t rw, uint16_t val, uint16_t index,
288 struct usb_device_request req;
290 URE_LOCK_ASSERT(sc, MA_OWNED);
292 if (rw == URE_CTL_WRITE)
293 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
295 req.bmRequestType = UT_READ_VENDOR_DEVICE;
296 req.bRequest = UR_SET_ADDRESS;
297 USETW(req.wValue, val);
298 USETW(req.wIndex, index);
299 USETW(req.wLength, len);
301 return (uether_do_request(&sc->sc_ue, &req, buf, 1000));
305 ure_read_mem(struct ure_softc *sc, uint16_t addr, uint16_t index,
309 return (ure_ctl(sc, URE_CTL_READ, addr, index, buf, len));
313 ure_write_mem(struct ure_softc *sc, uint16_t addr, uint16_t index,
317 return (ure_ctl(sc, URE_CTL_WRITE, addr, index, buf, len));
321 ure_read_1(struct ure_softc *sc, uint16_t reg, uint16_t index)
327 shift = (reg & 3) << 3;
330 ure_read_mem(sc, reg, index, &temp, 4);
338 ure_read_2(struct ure_softc *sc, uint16_t reg, uint16_t index)
344 shift = (reg & 2) << 3;
347 ure_read_mem(sc, reg, index, &temp, 4);
351 return (val & 0xffff);
355 ure_read_4(struct ure_softc *sc, uint16_t reg, uint16_t index)
359 ure_read_mem(sc, reg, index, &temp, 4);
360 return (UGETDW(temp));
364 ure_write_1(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
370 byen = URE_BYTE_EN_BYTE;
376 val <<= (shift << 3);
381 return (ure_write_mem(sc, reg, index | byen, &temp, 4));
385 ure_write_2(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
391 byen = URE_BYTE_EN_WORD;
397 val <<= (shift << 3);
402 return (ure_write_mem(sc, reg, index | byen, &temp, 4));
406 ure_write_4(struct ure_softc *sc, uint16_t reg, uint16_t index, uint32_t val)
411 return (ure_write_mem(sc, reg, index | URE_BYTE_EN_DWORD, &temp, 4));
415 ure_ocp_reg_read(struct ure_softc *sc, uint16_t addr)
419 ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000);
420 reg = (addr & 0x0fff) | 0xb000;
422 return (ure_read_2(sc, reg, URE_MCU_TYPE_PLA));
426 ure_ocp_reg_write(struct ure_softc *sc, uint16_t addr, uint16_t data)
430 ure_write_2(sc, URE_PLA_OCP_GPHY_BASE, URE_MCU_TYPE_PLA, addr & 0xf000);
431 reg = (addr & 0x0fff) | 0xb000;
433 ure_write_2(sc, reg, URE_MCU_TYPE_PLA, data);
437 ure_miibus_readreg(device_t dev, int phy, int reg)
439 struct ure_softc *sc;
443 sc = device_get_softc(dev);
444 locked = mtx_owned(&sc->sc_mtx);
448 /* Let the rgephy driver read the URE_GMEDIASTAT register. */
449 if (reg == URE_GMEDIASTAT) {
452 return (ure_read_1(sc, URE_GMEDIASTAT, URE_MCU_TYPE_PLA));
455 val = ure_ocp_reg_read(sc, URE_OCP_BASE_MII + reg * 2);
463 ure_miibus_writereg(device_t dev, int phy, int reg, int val)
465 struct ure_softc *sc;
468 sc = device_get_softc(dev);
469 if (sc->sc_phyno != phy)
472 locked = mtx_owned(&sc->sc_mtx);
476 ure_ocp_reg_write(sc, URE_OCP_BASE_MII + reg * 2, val);
484 ure_miibus_statchg(device_t dev)
486 struct ure_softc *sc;
487 struct mii_data *mii;
491 sc = device_get_softc(dev);
493 locked = mtx_owned(&sc->sc_mtx);
497 ifp = uether_getifp(&sc->sc_ue);
498 if (mii == NULL || ifp == NULL ||
499 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
502 sc->sc_flags &= ~URE_FLAG_LINK;
503 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
504 (IFM_ACTIVE | IFM_AVALID)) {
505 switch (IFM_SUBTYPE(mii->mii_media_active)) {
508 sc->sc_flags |= URE_FLAG_LINK;
509 sc->sc_rxstarted = 0;
512 if ((sc->sc_flags & URE_FLAG_8152) != 0)
514 sc->sc_flags |= URE_FLAG_LINK;
515 sc->sc_rxstarted = 0;
522 /* Lost link, do nothing. */
523 if ((sc->sc_flags & URE_FLAG_LINK) == 0)
531 * Probe for a RTL8152/RTL8153 chip.
534 ure_probe(device_t dev)
536 struct usb_attach_arg *uaa;
538 uaa = device_get_ivars(dev);
539 if (uaa->usb_mode != USB_MODE_HOST)
541 if (uaa->info.bConfigIndex != URE_CONFIG_IDX)
543 if (uaa->info.bIfaceIndex != URE_IFACE_IDX)
546 return (usbd_lookup_id_by_uaa(ure_devs, sizeof(ure_devs), uaa));
550 * Attach the interface. Allocate softc structures, do ifmedia
551 * setup and ethernet/BPF attach.
554 ure_attach(device_t dev)
556 struct usb_attach_arg *uaa = device_get_ivars(dev);
557 struct ure_softc *sc = device_get_softc(dev);
558 struct usb_ether *ue = &sc->sc_ue;
562 sc->sc_flags = USB_GET_DRIVER_INFO(uaa);
563 device_set_usb_desc(dev);
564 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
566 iface_index = URE_IFACE_IDX;
567 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_rx_xfer,
568 ure_config_rx, URE_N_TRANSFER, sc, &sc->sc_mtx);
570 device_printf(dev, "allocating USB RX transfers failed\n");
574 error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_tx_xfer,
575 ure_config_tx, URE_N_TRANSFER, sc, &sc->sc_mtx);
577 usbd_transfer_unsetup(sc->sc_rx_xfer, URE_N_TRANSFER);
578 device_printf(dev, "allocating USB TX transfers failed\n");
582 /* Mark all TX transfers as available */
583 for (int i = 0; i < URE_N_TRANSFER; i++) {
584 sc->sc_txavail[i] = sc->sc_tx_xfer[i];
585 DEVPRINTF(dev, "sc_txavail[%d] = %p\n", i, sc->sc_txavail[i]);
591 ue->ue_udev = uaa->device;
592 ue->ue_mtx = &sc->sc_mtx;
593 ue->ue_methods = &ure_ue_methods;
595 error = uether_ifattach(ue);
597 device_printf(dev, "could not attach interface\n");
600 return (0); /* success */
604 return (ENXIO); /* failure */
608 ure_detach(device_t dev)
610 struct ure_softc *sc = device_get_softc(dev);
611 struct usb_ether *ue = &sc->sc_ue;
613 usbd_transfer_unsetup(sc->sc_tx_xfer, URE_N_TRANSFER);
614 usbd_transfer_unsetup(sc->sc_rx_xfer, URE_N_TRANSFER);
616 mtx_destroy(&sc->sc_mtx);
622 * Copy from USB buffers to a new mbuf chain with pkt header.
624 * This will use m_getm2 to get a mbuf chain w/ properly sized mbuf
625 * clusters as necessary.
628 ure_makembuf(struct usb_page_cache *pc, usb_frlength_t offset,
631 struct usb_page_search_res;
635 m = m_getm2(NULL, len + ETHER_ALIGN, M_NOWAIT, MT_DATA, M_PKTHDR);
639 /* uether_newbuf does this. */
640 m_adj(m, ETHER_ALIGN);
642 m->m_pkthdr.len = len;
644 for (mb = m; len > 0; mb = mb->m_next) {
645 tlen = MIN(len, M_TRAILINGSPACE(mb));
647 usbd_copy_out(pc, offset, mtod(mb, uint8_t *), tlen);
658 ure_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
660 struct ure_softc *sc = usbd_xfer_softc(xfer);
661 struct usb_ether *ue = &sc->sc_ue;
662 struct ifnet *ifp = uether_getifp(ue);
663 struct usb_page_cache *pc;
665 struct ure_rxpkt pkt;
666 int actlen, off, len;
670 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
672 switch (USB_GET_STATE(xfer)) {
673 case USB_ST_TRANSFERRED:
675 pc = usbd_xfer_get_frame(xfer, 0);
676 caps = if_getcapenable(ifp);
677 DEVPRINTFN(13, sc->sc_ue.ue_dev, "rcb start\n");
679 if (actlen < (int)(sizeof(pkt))) {
680 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
683 usbd_copy_out(pc, off, &pkt, sizeof(pkt));
686 actlen -= sizeof(pkt);
688 len = le32toh(pkt.ure_pktlen) & URE_RXPKT_LEN_MASK;
690 DEVPRINTFN(13, sc->sc_ue.ue_dev,
691 "rxpkt: %#x, %#x, %#x, %#x, %#x, %#x\n",
692 pkt.ure_pktlen, pkt.ure_csum, pkt.ure_misc,
693 pkt.ure_rsvd2, pkt.ure_rsvd3, pkt.ure_rsvd4);
694 DEVPRINTFN(13, sc->sc_ue.ue_dev, "len: %d\n", len);
696 if (len >= URE_RXPKT_LEN_MASK) {
698 * drop the rest of this segment. With out
699 * more information, we cannot know where next
700 * packet starts. Blindly continuing would
701 * cause a packet in packet attack, allowing
702 * one VLAN to inject packets w/o a VLAN tag,
703 * or injecting packets into other VLANs.
705 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
710 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
715 m = ure_makembuf(pc, off, len - ETHER_CRC_LEN);
719 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
721 /* make mbuf and queue */
722 pktcsum = le32toh(pkt.ure_csum);
723 if (caps & IFCAP_VLAN_HWTAGGING &&
724 pktcsum & URE_RXPKT_RX_VLAN_TAG) {
725 m->m_pkthdr.ether_vtag =
727 URE_RXPKT_VLAN_MASK);
728 m->m_flags |= M_VLANTAG;
731 /* set the necessary flags for rx checksum */
732 ure_rxcsum(caps, &pkt, m);
734 uether_rxmbuf(ue, m, len - ETHER_CRC_LEN);
737 off += roundup(len, URE_RXPKT_ALIGN);
738 actlen -= roundup(len, URE_RXPKT_ALIGN);
740 DEVPRINTFN(13, sc->sc_ue.ue_dev, "rcb end\n");
745 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
746 usbd_transfer_submit(xfer);
751 DPRINTF("bulk read error, %s\n",
754 if (error != USB_ERR_CANCELLED) {
755 /* try to clear stall first */
756 usbd_xfer_set_stall(xfer);
764 ure_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
766 struct ure_softc *sc = usbd_xfer_softc(xfer);
767 struct ifnet *ifp = uether_getifp(&sc->sc_ue);
768 struct usb_page_cache *pc;
770 struct ure_txpkt txpkt;
776 switch (USB_GET_STATE(xfer)) {
777 case USB_ST_TRANSFERRED:
778 DPRINTFN(11, "transfer complete\n");
779 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
784 if ((sc->sc_flags & URE_FLAG_LINK) == 0) {
785 /* don't send anything if there is no link! */
789 pc = usbd_xfer_get_frame(xfer, 0);
790 caps = if_getcapenable(ifp);
793 rem = URE_TRANSFER_SIZE;
794 while (rem > sizeof(txpkt)) {
795 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
800 * make sure we don't ever send too large of a
803 len = m->m_pkthdr.len;
804 if ((len & URE_TXPKT_LEN_MASK) != len) {
805 device_printf(sc->sc_ue.ue_dev,
806 "pkt len too large: %#x", len);
808 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
814 roundup(len, URE_TXPKT_ALIGN) > rem) {
816 IFQ_DRV_PREPEND(&ifp->if_snd, m);
821 txpkt = (struct ure_txpkt){};
822 txpkt.ure_pktlen = htole32((len & URE_TXPKT_LEN_MASK) |
823 URE_TKPKT_TX_FS | URE_TKPKT_TX_LS);
824 if (m->m_flags & M_VLANTAG) {
825 txpkt.ure_csum = htole32(
826 bswap16(m->m_pkthdr.ether_vtag &
827 URE_TXPKT_VLAN_MASK) | URE_TXPKT_VLAN);
829 if (ure_txcsum(m, caps, ®tmp)) {
830 device_printf(sc->sc_ue.ue_dev,
831 "pkt l4 off too large");
834 txpkt.ure_csum |= htole32(regtmp);
836 DEVPRINTFN(13, sc->sc_ue.ue_dev,
837 "txpkt: mbflg: %#x, %#x, %#x\n",
838 m->m_pkthdr.csum_flags, le32toh(txpkt.ure_pktlen),
839 le32toh(txpkt.ure_csum));
841 usbd_copy_in(pc, pos, &txpkt, sizeof(txpkt));
843 pos += sizeof(txpkt);
844 rem -= sizeof(txpkt);
846 usbd_m_copy_in(pc, pos, m, 0, len);
848 pos += roundup(len, URE_TXPKT_ALIGN);
849 rem -= roundup(len, URE_TXPKT_ALIGN);
851 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
854 * If there's a BPF listener, bounce a copy
855 * of this frame to him.
862 /* no packets to send */
866 /* Set frame length. */
867 usbd_xfer_set_frame_len(xfer, 0, pos);
869 usbd_transfer_submit(xfer);
871 KASSERT(sc->sc_txpos >= 0 && sc->sc_txpos <= URE_N_TRANSFER,
872 ("sc_txpos invalid: %d", sc->sc_txpos));
873 if (sc->sc_txpos < URE_N_TRANSFER &&
874 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
875 xfer = sc->sc_txavail[sc->sc_txpos++];
876 usbd_transfer_start(xfer);
879 if (sc->sc_txpos == URE_N_TRANSFER)
880 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
884 DPRINTFN(11, "transfer error, %s\n",
887 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
888 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
890 if (error == USB_ERR_TIMEOUT) {
891 DEVPRINTFN(12, sc->sc_ue.ue_dev,
895 if (error != USB_ERR_CANCELLED) {
896 /* try to clear stall first */
897 usbd_xfer_set_stall(xfer);
902 KASSERT(sc->sc_txpos > 0 && sc->sc_txpos <= URE_N_TRANSFER, ("sc_txpos invalid: %d", sc->sc_txpos));
903 sc->sc_txavail[(--(sc->sc_txpos))] = xfer;
904 if (sc->sc_txpos < URE_N_TRANSFER)
905 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
909 ure_read_chipver(struct ure_softc *sc)
913 ver = ure_read_2(sc, URE_PLA_TCR1, URE_MCU_TYPE_PLA) & URE_VERSION_MASK;
917 sc->sc_chip |= URE_CHIP_VER_4C00;
920 sc->sc_chip |= URE_CHIP_VER_4C10;
923 sc->sc_chip |= URE_CHIP_VER_5C00;
926 sc->sc_chip |= URE_CHIP_VER_5C10;
929 sc->sc_chip |= URE_CHIP_VER_5C20;
932 sc->sc_chip |= URE_CHIP_VER_5C30;
935 device_printf(sc->sc_ue.ue_dev,
936 "unknown version 0x%04x\n", ver);
942 ure_sysctl_chipver(SYSCTL_HANDLER_ARGS)
945 struct ure_softc *sc = arg1;
948 sbuf_new_for_sysctl(&sb, NULL, 0, req);
950 sbuf_printf(&sb, "%04x", sc->sc_ver);
952 error = sbuf_finish(&sb);
959 ure_attach_post(struct usb_ether *ue)
961 struct ure_softc *sc = uether_getsc(ue);
963 sc->sc_rxstarted = 0;
966 /* Determine the chip version. */
967 ure_read_chipver(sc);
969 /* Initialize controller and get station address. */
970 if (sc->sc_flags & URE_FLAG_8152)
971 ure_rtl8152_init(sc);
973 ure_rtl8153_init(sc);
975 if ((sc->sc_chip & URE_CHIP_VER_4C00) ||
976 (sc->sc_chip & URE_CHIP_VER_4C10))
977 ure_read_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA,
980 ure_read_mem(sc, URE_PLA_BACKUP, URE_MCU_TYPE_PLA,
983 if (ETHER_IS_ZERO(sc->sc_ue.ue_eaddr)) {
984 device_printf(sc->sc_ue.ue_dev, "MAC assigned randomly\n");
985 arc4rand(sc->sc_ue.ue_eaddr, ETHER_ADDR_LEN, 0);
986 sc->sc_ue.ue_eaddr[0] &= ~0x01; /* unicast */
987 sc->sc_ue.ue_eaddr[0] |= 0x02; /* locally administered */
992 ure_attach_post_sub(struct usb_ether *ue)
994 struct sysctl_ctx_list *sctx;
995 struct sysctl_oid *soid;
996 struct ure_softc *sc;
1000 sc = uether_getsc(ue);
1002 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1003 ifp->if_start = uether_start;
1004 ifp->if_ioctl = ure_ioctl;
1005 ifp->if_init = uether_init;
1006 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
1008 * Try to keep two transfers full at a time.
1009 * ~(TRANSFER_SIZE / 80 bytes/pkt * 2 buffers in flight)
1011 ifp->if_snd.ifq_drv_maxlen = 512;
1012 IFQ_SET_READY(&ifp->if_snd);
1014 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
1015 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
1016 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM|IFCAP_HWCSUM, 0);
1017 if_sethwassist(ifp, CSUM_IP|CSUM_IP_UDP|CSUM_IP_TCP);
1019 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM_IPV6, 0);
1021 if_setcapenable(ifp, if_getcapabilities(ifp));
1024 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
1025 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts,
1026 BMSR_DEFCAPMASK, sc->sc_phyno, MII_OFFSET_ANY, 0);
1029 sctx = device_get_sysctl_ctx(sc->sc_ue.ue_dev);
1030 soid = device_get_sysctl_tree(sc->sc_ue.ue_dev);
1031 SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "chipver",
1032 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
1033 ure_sysctl_chipver, "A",
1034 "Return string with chip version.");
1040 ure_init(struct usb_ether *ue)
1042 struct ure_softc *sc = uether_getsc(ue);
1043 struct ifnet *ifp = uether_getifp(ue);
1046 URE_LOCK_ASSERT(sc, MA_OWNED);
1048 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1051 /* Cancel pending I/O. */
1056 /* Set MAC address. */
1057 ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_CONFIG);
1058 ure_write_mem(sc, URE_PLA_IDR, URE_MCU_TYPE_PLA | URE_BYTE_EN_SIX_BYTES,
1060 ure_write_1(sc, URE_PLA_CRWECR, URE_MCU_TYPE_PLA, URE_CRWECR_NORAML);
1062 /* Reset the packet filter. */
1063 ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA,
1064 ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) &
1065 ~URE_FMC_FCR_MCU_EN);
1066 ure_write_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA,
1067 ure_read_2(sc, URE_PLA_FMC, URE_MCU_TYPE_PLA) |
1068 URE_FMC_FCR_MCU_EN);
1070 /* Enable RX VLANs if enabled */
1071 cpcr = ure_read_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA);
1072 if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) {
1073 DEVPRINTFN(12, sc->sc_ue.ue_dev, "enabled hw vlan tag\n");
1074 cpcr |= URE_CPCR_RX_VLAN;
1076 DEVPRINTFN(12, sc->sc_ue.ue_dev, "disabled hw vlan tag\n");
1077 cpcr &= ~URE_CPCR_RX_VLAN;
1079 ure_write_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA, cpcr);
1081 /* Enable transmit and receive. */
1082 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA,
1083 ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) | URE_CR_RE |
1086 ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA,
1087 ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) &
1088 ~URE_RXDY_GATED_EN);
1090 /* Configure RX filters. */
1093 usbd_xfer_set_stall(sc->sc_tx_xfer[0]);
1095 /* Indicate we are up and running. */
1096 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1098 /* Switch to selected media. */
1099 ure_ifmedia_upd(ifp);
1103 ure_tick(struct usb_ether *ue)
1105 struct ure_softc *sc = uether_getsc(ue);
1106 struct ifnet *ifp = uether_getifp(ue);
1107 struct mii_data *mii = GET_MII(sc);
1109 URE_LOCK_ASSERT(sc, MA_OWNED);
1111 KASSERT(sc->sc_txpos >= 0 && sc->sc_txpos <= URE_N_TRANSFER, ("sc_txpos invalid: %d", sc->sc_txpos));
1113 DEVPRINTFN(13, sc->sc_ue.ue_dev,
1114 "sc_txpos: %d, oactive: %d\n", sc->sc_txpos, !!(ifp->if_drv_flags & IFF_DRV_OACTIVE));
1115 for (int i = 0; i < URE_N_TRANSFER; i++)
1116 DEVPRINTFN(13, sc->sc_ue.ue_dev,
1117 "rx[%d] = %d\n", i, USB_GET_STATE(sc->sc_rx_xfer[i]));
1119 for (int i = 0; i < URE_N_TRANSFER; i++)
1120 DEVPRINTFN(13, sc->sc_ue.ue_dev,
1121 "tx[%d] = %d\n", i, USB_GET_STATE(sc->sc_tx_xfer[i]));
1124 if ((sc->sc_flags & URE_FLAG_LINK) == 0
1125 && mii->mii_media_status & IFM_ACTIVE &&
1126 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1127 sc->sc_flags |= URE_FLAG_LINK;
1128 sc->sc_rxstarted = 0;
1134 ure_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1136 uint32_t h, *hashes = arg;
1138 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26;
1140 hashes[0] |= (1 << h);
1142 hashes[1] |= (1 << (h - 32));
1147 * Program the 64-bit multicast hash filter.
1150 ure_rxfilter(struct usb_ether *ue)
1152 struct ure_softc *sc = uether_getsc(ue);
1153 struct ifnet *ifp = uether_getifp(ue);
1155 uint32_t h, hashes[2] = { 0, 0 };
1157 URE_LOCK_ASSERT(sc, MA_OWNED);
1159 rxmode = ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA);
1160 rxmode &= ~(URE_RCR_AAP | URE_RCR_AM);
1161 rxmode |= URE_RCR_APM; /* accept physical match packets */
1162 rxmode |= URE_RCR_AB; /* always accept broadcasts */
1163 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
1164 if (ifp->if_flags & IFF_PROMISC)
1165 rxmode |= URE_RCR_AAP;
1166 rxmode |= URE_RCR_AM;
1167 hashes[0] = hashes[1] = 0xffffffff;
1171 /* calculate multicast masks */
1172 if_foreach_llmaddr(ifp, ure_hash_maddr, &hashes);
1174 h = bswap32(hashes[0]);
1175 hashes[0] = bswap32(hashes[1]);
1177 rxmode |= URE_RCR_AM; /* accept multicast packets */
1180 DEVPRINTFN(14, ue->ue_dev, "rxfilt: RCR: %#x\n",
1181 ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA));
1182 ure_write_4(sc, URE_PLA_MAR0, URE_MCU_TYPE_PLA, hashes[0]);
1183 ure_write_4(sc, URE_PLA_MAR4, URE_MCU_TYPE_PLA, hashes[1]);
1184 ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA, rxmode);
1188 ure_start(struct usb_ether *ue)
1190 struct ure_softc *sc = uether_getsc(ue);
1191 struct usb_xfer *xfer;
1194 URE_LOCK_ASSERT(sc, MA_OWNED);
1196 if (!sc->sc_rxstarted) {
1197 sc->sc_rxstarted = 1;
1198 for (int i = 0; i < URE_N_TRANSFER; i++)
1199 usbd_transfer_start(sc->sc_rx_xfer[i]);
1203 * start the USB transfers, if not already started:
1205 if (sc->sc_txpos == URE_N_TRANSFER) {
1206 ifp = uether_getifp(&sc->sc_ue);
1208 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1212 KASSERT(sc->sc_txpos >= 0 && sc->sc_txpos < URE_N_TRANSFER, ("sc_txpos invalid: %d", sc->sc_txpos));
1213 xfer = sc->sc_txavail[sc->sc_txpos++];
1214 if (sc->sc_txpos == URE_N_TRANSFER) {
1215 ifp = uether_getifp(&sc->sc_ue);
1216 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1218 usbd_transfer_start(xfer);
1222 ure_reset(struct ure_softc *sc)
1226 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, URE_CR_RST);
1228 for (i = 0; i < URE_TIMEOUT; i++) {
1229 if (!(ure_read_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA) &
1232 uether_pause(&sc->sc_ue, hz / 100);
1234 if (i == URE_TIMEOUT)
1235 device_printf(sc->sc_ue.ue_dev, "reset never completed\n");
1239 * Set media options.
1242 ure_ifmedia_upd(struct ifnet *ifp)
1244 struct ure_softc *sc = ifp->if_softc;
1245 struct mii_data *mii = GET_MII(sc);
1246 struct mii_softc *miisc;
1249 URE_LOCK_ASSERT(sc, MA_OWNED);
1251 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1253 error = mii_mediachg(mii);
1258 * Report current media status.
1261 ure_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1263 struct ure_softc *sc;
1264 struct mii_data *mii;
1271 ifmr->ifm_active = mii->mii_media_active;
1272 ifmr->ifm_status = mii->mii_media_status;
1277 ure_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1279 struct usb_ether *ue = ifp->if_softc;
1280 struct ure_softc *sc;
1282 int error, mask, reinit;
1284 sc = uether_getsc(ue);
1285 ifr = (struct ifreq *)data;
1291 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1292 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
1293 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) {
1294 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1297 if ((mask & IFCAP_TXCSUM) != 0 &&
1298 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
1299 ifp->if_capenable ^= IFCAP_TXCSUM;
1301 if ((mask & IFCAP_RXCSUM) != 0 &&
1302 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
1303 ifp->if_capenable ^= IFCAP_RXCSUM;
1305 if ((mask & IFCAP_TXCSUM_IPV6) != 0 &&
1306 (ifp->if_capabilities & IFCAP_TXCSUM_IPV6) != 0) {
1307 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
1309 if ((mask & IFCAP_RXCSUM_IPV6) != 0 &&
1310 (ifp->if_capabilities & IFCAP_RXCSUM_IPV6) != 0) {
1311 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
1313 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING)
1314 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1324 * in testing large MTUs "crashes" the device, it
1325 * leaves the device w/ a broken state where link
1326 * is in a bad state.
1328 if (ifr->ifr_mtu < ETHERMIN ||
1329 ifr->ifr_mtu > (4096 - ETHER_HDR_LEN -
1330 ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN)) {
1335 if (if_getmtu(ifp) != ifr->ifr_mtu)
1336 if_setmtu(ifp, ifr->ifr_mtu);
1341 error = uether_ioctl(ifp, cmd, data);
1348 ure_rtl8152_init(struct ure_softc *sc)
1352 /* Disable ALDPS. */
1353 ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA |
1355 uether_pause(&sc->sc_ue, hz / 50);
1357 if (sc->sc_chip & URE_CHIP_VER_4C00) {
1358 ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA,
1359 ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) &
1360 ~URE_LED_MODE_MASK);
1363 ure_write_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB,
1364 ure_read_2(sc, URE_USB_UPS_CTRL, URE_MCU_TYPE_USB) &
1366 ure_write_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB,
1367 ure_read_2(sc, URE_USB_PM_CTRL_STATUS, URE_MCU_TYPE_USB) &
1368 ~URE_RESUME_INDICATE);
1370 ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA,
1371 ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) |
1372 URE_TX_10M_IDLE_EN | URE_PFM_PWM_SWITCH);
1373 pwrctrl = ure_read_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA);
1374 pwrctrl &= ~URE_MCU_CLK_RATIO_MASK;
1375 pwrctrl |= URE_MCU_CLK_RATIO | URE_D3_CLK_GATED_EN;
1376 ure_write_4(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA, pwrctrl);
1377 ure_write_2(sc, URE_PLA_GPHY_INTR_IMR, URE_MCU_TYPE_PLA,
1378 URE_GPHY_STS_MSK | URE_SPEED_DOWN_MSK | URE_SPDWN_RXDV_MSK |
1379 URE_SPDWN_LINKCHG_MSK);
1381 /* Enable Rx aggregation. */
1382 ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB,
1383 ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) &
1384 ~URE_RX_AGG_DISABLE);
1386 /* Disable ALDPS. */
1387 ure_ocp_reg_write(sc, URE_OCP_ALDPS_CONFIG, URE_ENPDNPS | URE_LINKENA |
1389 uether_pause(&sc->sc_ue, hz / 50);
1393 ure_write_1(sc, URE_USB_TX_AGG, URE_MCU_TYPE_USB,
1394 URE_TX_AGG_MAX_THRESHOLD);
1395 ure_write_4(sc, URE_USB_RX_BUF_TH, URE_MCU_TYPE_USB, URE_RX_THR_HIGH);
1396 ure_write_4(sc, URE_USB_TX_DMA, URE_MCU_TYPE_USB,
1397 URE_TEST_MODE_DISABLE | URE_TX_SIZE_ADJUST1);
1401 ure_rtl8153_init(struct ure_softc *sc)
1407 /* Disable ALDPS. */
1408 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
1409 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS);
1410 uether_pause(&sc->sc_ue, hz / 50);
1412 memset(u1u2, 0x00, sizeof(u1u2));
1413 ure_write_mem(sc, URE_USB_TOLERANCE,
1414 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
1416 for (i = 0; i < URE_TIMEOUT; i++) {
1417 if (ure_read_2(sc, URE_PLA_BOOT_CTRL, URE_MCU_TYPE_PLA) &
1420 uether_pause(&sc->sc_ue, hz / 100);
1422 if (i == URE_TIMEOUT)
1423 device_printf(sc->sc_ue.ue_dev,
1424 "timeout waiting for chip autoload\n");
1426 for (i = 0; i < URE_TIMEOUT; i++) {
1427 val = ure_ocp_reg_read(sc, URE_OCP_PHY_STATUS) &
1429 if (val == URE_PHY_STAT_LAN_ON || val == URE_PHY_STAT_PWRDN)
1431 uether_pause(&sc->sc_ue, hz / 100);
1433 if (i == URE_TIMEOUT)
1434 device_printf(sc->sc_ue.ue_dev,
1435 "timeout waiting for phy to stabilize\n");
1437 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB,
1438 ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB) &
1441 if (sc->sc_chip & URE_CHIP_VER_5C10) {
1442 val = ure_read_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB);
1443 val &= ~URE_PWD_DN_SCALE_MASK;
1444 val |= URE_PWD_DN_SCALE(96);
1445 ure_write_2(sc, URE_USB_SSPHYLINK2, URE_MCU_TYPE_USB, val);
1447 ure_write_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB,
1448 ure_read_1(sc, URE_USB_USB2PHY, URE_MCU_TYPE_USB) |
1449 URE_USB2PHY_L1 | URE_USB2PHY_SUSPEND);
1450 } else if (sc->sc_chip & URE_CHIP_VER_5C20) {
1451 ure_write_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA,
1452 ure_read_1(sc, URE_PLA_DMY_REG0, URE_MCU_TYPE_PLA) &
1455 if (sc->sc_chip & (URE_CHIP_VER_5C20 | URE_CHIP_VER_5C30)) {
1456 val = ure_read_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB);
1457 if (ure_read_2(sc, URE_USB_BURST_SIZE, URE_MCU_TYPE_USB) ==
1459 val &= ~URE_DYNAMIC_BURST;
1461 val |= URE_DYNAMIC_BURST;
1462 ure_write_1(sc, URE_USB_CSR_DUMMY1, URE_MCU_TYPE_USB, val);
1465 ure_write_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB,
1466 ure_read_1(sc, URE_USB_CSR_DUMMY2, URE_MCU_TYPE_USB) |
1469 ure_write_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB,
1470 ure_read_2(sc, URE_USB_WDT11_CTRL, URE_MCU_TYPE_USB) &
1473 ure_write_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA,
1474 ure_read_2(sc, URE_PLA_LED_FEATURE, URE_MCU_TYPE_PLA) &
1475 ~URE_LED_MODE_MASK);
1477 if ((sc->sc_chip & URE_CHIP_VER_5C10) &&
1478 usbd_get_speed(sc->sc_ue.ue_udev) != USB_SPEED_SUPER)
1479 val = URE_LPM_TIMER_500MS;
1481 val = URE_LPM_TIMER_500US;
1482 ure_write_1(sc, URE_USB_LPM_CTRL, URE_MCU_TYPE_USB,
1483 val | URE_FIFO_EMPTY_1FB | URE_ROK_EXIT_LPM);
1485 val = ure_read_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB);
1486 val &= ~URE_SEN_VAL_MASK;
1487 val |= URE_SEN_VAL_NORMAL | URE_SEL_RXIDLE;
1488 ure_write_2(sc, URE_USB_AFE_CTRL2, URE_MCU_TYPE_USB, val);
1490 ure_write_2(sc, URE_USB_CONNECT_TIMER, URE_MCU_TYPE_USB, 0x0001);
1492 ure_write_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB,
1493 ure_read_2(sc, URE_USB_POWER_CUT, URE_MCU_TYPE_USB) &
1494 ~(URE_PWR_EN | URE_PHASE2_EN));
1495 ure_write_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB,
1496 ure_read_2(sc, URE_USB_MISC_0, URE_MCU_TYPE_USB) &
1499 memset(u1u2, 0xff, sizeof(u1u2));
1500 ure_write_mem(sc, URE_USB_TOLERANCE,
1501 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
1503 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL, URE_MCU_TYPE_PLA,
1504 URE_ALDPS_SPDWN_RATIO);
1505 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL2, URE_MCU_TYPE_PLA,
1506 URE_EEE_SPDWN_RATIO);
1507 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL3, URE_MCU_TYPE_PLA,
1508 URE_PKT_AVAIL_SPDWN_EN | URE_SUSPEND_SPDWN_EN |
1509 URE_U1U2_SPDWN_EN | URE_L1_SPDWN_EN);
1510 ure_write_2(sc, URE_PLA_MAC_PWR_CTRL4, URE_MCU_TYPE_PLA,
1511 URE_PWRSAVE_SPDWN_EN | URE_RXDV_SPDWN_EN | URE_TX10MIDLE_EN |
1512 URE_TP100_SPDWN_EN | URE_TP500_SPDWN_EN | URE_TP1000_SPDWN_EN |
1515 val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB);
1516 if (!(sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10)))
1517 val |= URE_U2P3_ENABLE;
1519 val &= ~URE_U2P3_ENABLE;
1520 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val);
1522 memset(u1u2, 0x00, sizeof(u1u2));
1523 ure_write_mem(sc, URE_USB_TOLERANCE,
1524 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
1526 /* Disable ALDPS. */
1527 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
1528 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) & ~URE_EN_ALDPS);
1529 uether_pause(&sc->sc_ue, hz / 50);
1533 /* Enable Rx aggregation. */
1534 ure_write_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB,
1535 ure_read_2(sc, URE_USB_USB_CTRL, URE_MCU_TYPE_USB) &
1536 ~URE_RX_AGG_DISABLE);
1538 val = ure_read_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB);
1539 if (!(sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10)))
1540 val |= URE_U2P3_ENABLE;
1542 val &= ~URE_U2P3_ENABLE;
1543 ure_write_2(sc, URE_USB_U2P3_CTRL, URE_MCU_TYPE_USB, val);
1545 memset(u1u2, 0xff, sizeof(u1u2));
1546 ure_write_mem(sc, URE_USB_TOLERANCE,
1547 URE_MCU_TYPE_USB | URE_BYTE_EN_SIX_BYTES, u1u2, sizeof(u1u2));
1551 ure_stop(struct usb_ether *ue)
1553 struct ure_softc *sc = uether_getsc(ue);
1554 struct ifnet *ifp = uether_getifp(ue);
1556 URE_LOCK_ASSERT(sc, MA_OWNED);
1558 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1559 sc->sc_flags &= ~URE_FLAG_LINK;
1560 sc->sc_rxstarted = 0;
1563 * stop all the transfers, if not already stopped:
1565 for (int i = 0; i < URE_N_TRANSFER; i++) {
1566 usbd_transfer_stop(sc->sc_rx_xfer[i]);
1567 usbd_transfer_stop(sc->sc_tx_xfer[i]);
1572 ure_disable_teredo(struct ure_softc *sc)
1575 ure_write_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA,
1576 ure_read_4(sc, URE_PLA_TEREDO_CFG, URE_MCU_TYPE_PLA) &
1577 ~(URE_TEREDO_SEL | URE_TEREDO_RS_EVENT_MASK | URE_OOB_TEREDO_EN));
1578 ure_write_2(sc, URE_PLA_WDT6_CTRL, URE_MCU_TYPE_PLA,
1580 ure_write_2(sc, URE_PLA_REALWOW_TIMER, URE_MCU_TYPE_PLA, 0);
1581 ure_write_4(sc, URE_PLA_TEREDO_TIMER, URE_MCU_TYPE_PLA, 0);
1585 ure_init_fifo(struct ure_softc *sc)
1587 uint32_t rx_fifo1, rx_fifo2;
1590 ure_write_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA,
1591 ure_read_2(sc, URE_PLA_MISC_1, URE_MCU_TYPE_PLA) |
1594 ure_disable_teredo(sc);
1596 DEVPRINTFN(14, sc->sc_ue.ue_dev, "init_fifo: RCR: %#x\n", ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA));
1597 ure_write_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA,
1598 ure_read_4(sc, URE_PLA_RCR, URE_MCU_TYPE_PLA) &
1601 if (!(sc->sc_flags & URE_FLAG_8152)) {
1602 if (sc->sc_chip & (URE_CHIP_VER_5C00 | URE_CHIP_VER_5C10 |
1603 URE_CHIP_VER_5C20)) {
1604 ure_ocp_reg_write(sc, URE_OCP_ADC_CFG,
1605 URE_CKADSEL_L | URE_ADC_EN | URE_EN_EMI_L);
1607 if (sc->sc_chip & URE_CHIP_VER_5C00) {
1608 ure_ocp_reg_write(sc, URE_OCP_EEE_CFG,
1609 ure_ocp_reg_read(sc, URE_OCP_EEE_CFG) &
1610 ~URE_CTAP_SHORT_EN);
1612 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
1613 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) |
1615 ure_ocp_reg_write(sc, URE_OCP_DOWN_SPEED,
1616 ure_ocp_reg_read(sc, URE_OCP_DOWN_SPEED) |
1618 ure_ocp_reg_write(sc, URE_OCP_POWER_CFG,
1619 ure_ocp_reg_read(sc, URE_OCP_POWER_CFG) |
1621 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_IMPEDANCE);
1622 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0b13);
1623 ure_write_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA,
1624 ure_read_2(sc, URE_PLA_PHY_PWR, URE_MCU_TYPE_PLA) |
1625 URE_PFM_PWM_SWITCH);
1627 /* Enable LPF corner auto tune. */
1628 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_LPF_CFG);
1629 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0xf70f);
1631 /* Adjust 10M amplitude. */
1632 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP1);
1633 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x00af);
1634 ure_ocp_reg_write(sc, URE_OCP_SRAM_ADDR, URE_SRAM_10M_AMP2);
1635 ure_ocp_reg_write(sc, URE_OCP_SRAM_DATA, 0x0208);
1640 ure_write_1(sc, URE_PLA_CR, URE_MCU_TYPE_PLA, 0);
1642 ure_write_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA,
1643 ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1646 ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA,
1647 ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) &
1649 for (i = 0; i < URE_TIMEOUT; i++) {
1650 if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1651 URE_LINK_LIST_READY)
1653 uether_pause(&sc->sc_ue, hz / 100);
1655 if (i == URE_TIMEOUT)
1656 device_printf(sc->sc_ue.ue_dev,
1657 "timeout waiting for OOB control\n");
1658 ure_write_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA,
1659 ure_read_2(sc, URE_PLA_SFF_STS_7, URE_MCU_TYPE_PLA) |
1661 for (i = 0; i < URE_TIMEOUT; i++) {
1662 if (ure_read_1(sc, URE_PLA_OOB_CTRL, URE_MCU_TYPE_PLA) &
1663 URE_LINK_LIST_READY)
1665 uether_pause(&sc->sc_ue, hz / 100);
1667 if (i == URE_TIMEOUT)
1668 device_printf(sc->sc_ue.ue_dev,
1669 "timeout waiting for OOB control\n");
1671 ure_write_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA,
1672 ure_read_2(sc, URE_PLA_CPCR, URE_MCU_TYPE_PLA) &
1674 ure_write_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA,
1675 ure_read_2(sc, URE_PLA_TCR0, URE_MCU_TYPE_PLA) |
1676 URE_TCR0_AUTO_FIFO);
1678 /* Configure Rx FIFO threshold. */
1679 ure_write_4(sc, URE_PLA_RXFIFO_CTRL0, URE_MCU_TYPE_PLA,
1680 URE_RXFIFO_THR1_NORMAL);
1681 if (usbd_get_speed(sc->sc_ue.ue_udev) == USB_SPEED_FULL) {
1682 rx_fifo1 = URE_RXFIFO_THR2_FULL;
1683 rx_fifo2 = URE_RXFIFO_THR3_FULL;
1685 rx_fifo1 = URE_RXFIFO_THR2_HIGH;
1686 rx_fifo2 = URE_RXFIFO_THR3_HIGH;
1688 ure_write_4(sc, URE_PLA_RXFIFO_CTRL1, URE_MCU_TYPE_PLA, rx_fifo1);
1689 ure_write_4(sc, URE_PLA_RXFIFO_CTRL2, URE_MCU_TYPE_PLA, rx_fifo2);
1691 /* Configure Tx FIFO threshold. */
1692 ure_write_4(sc, URE_PLA_TXFIFO_CTRL, URE_MCU_TYPE_PLA,
1693 URE_TXFIFO_THR_NORMAL);
1697 * Update mbuf for rx checksum from hardware
1700 ure_rxcsum(int capenb, struct ure_rxpkt *rp, struct mbuf *m)
1703 uint32_t csum, misc;
1706 m->m_pkthdr.csum_flags = 0;
1708 if (!(capenb & IFCAP_RXCSUM))
1711 csum = le32toh(rp->ure_csum);
1712 misc = le32toh(rp->ure_misc);
1717 if (csum & URE_RXPKT_IPV4_CS)
1718 flags |= CSUM_IP_CHECKED;
1719 else if (csum & URE_RXPKT_IPV6_CS)
1722 tcp = rp->ure_csum & URE_RXPKT_TCP_CS;
1723 udp = rp->ure_csum & URE_RXPKT_UDP_CS;
1725 if (__predict_true((flags & CSUM_IP_CHECKED) &&
1726 !(misc & URE_RXPKT_IP_F))) {
1727 flags |= CSUM_IP_VALID;
1730 (tcp && !(misc & URE_RXPKT_TCP_F)) ||
1731 (udp && !(misc & URE_RXPKT_UDP_F)))) {
1732 flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1733 m->m_pkthdr.csum_data = 0xFFFF;
1736 m->m_pkthdr.csum_flags = flags;
1740 * If the L4 checksum offset is larger than 0x7ff (2047), return failure.
1741 * We currently restrict MTU such that it can't happen, and even if we
1742 * did have a large enough MTU, only a very specially crafted IPv6 packet
1743 * with MANY headers could possibly come close.
1745 * Returns 0 for success, and 1 if the packet cannot be checksummed and
1746 * should be dropped.
1749 ure_txcsum(struct mbuf *m, int caps, uint32_t *regout)
1752 struct ether_header *eh;
1760 flags = m->m_pkthdr.csum_flags;
1764 if (__predict_true(m->m_len >= (int)sizeof(*eh))) {
1765 eh = mtod(m, struct ether_header *);
1766 type = eh->ether_type;
1768 m_copydata(m, offsetof(struct ether_header, ether_type),
1769 sizeof(type), (caddr_t)&type);
1771 switch (type = htons(type)) {
1773 case ETHERTYPE_IPV6:
1774 l3off = ETHER_HDR_LEN;
1776 case ETHERTYPE_VLAN:
1777 /* XXX - what about QinQ? */
1778 l3off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1786 if (flags & CSUM_IP)
1787 reg |= URE_TXPKT_IPV4_CS;
1789 data = m->m_pkthdr.csum_data;
1790 if (flags & (CSUM_IP_TCP | CSUM_IP_UDP)) {
1791 m_copydata(m, l3off, sizeof ip, (caddr_t)&ip);
1792 l4off = l3off + (ip.ip_hl << 2) + data;
1793 if (__predict_false(l4off > URE_L4_OFFSET_MAX))
1796 reg |= URE_TXPKT_IPV4_CS;
1797 if (flags & CSUM_IP_TCP)
1798 reg |= URE_TXPKT_TCP_CS;
1799 else if (flags & CSUM_IP_UDP)
1800 reg |= URE_TXPKT_UDP_CS;
1801 reg |= l4off << URE_L4_OFFSET_SHIFT;
1804 else if (flags & (CSUM_IP6_TCP | CSUM_IP6_UDP)) {
1805 l4off = l3off + data;
1806 if (__predict_false(l4off > URE_L4_OFFSET_MAX))
1809 reg |= URE_TXPKT_IPV6_CS;
1810 if (flags & CSUM_IP6_TCP)
1811 reg |= URE_TXPKT_TCP_CS;
1812 else if (flags & CSUM_IP6_UDP)
1813 reg |= URE_TXPKT_UDP_CS;
1814 reg |= l4off << URE_L4_OFFSET_SHIFT;