2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013-2014 Kevin Lo
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
33 * ASIX Electronics AX88178A/AX88179 USB 2.0/3.0 gigabit ethernet driver.
36 #include <sys/param.h>
37 #include <sys/systm.h>
39 #include <sys/condvar.h>
40 #include <sys/endian.h>
41 #include <sys/kernel.h>
43 #include <sys/module.h>
44 #include <sys/mutex.h>
45 #include <sys/socket.h>
46 #include <sys/sysctl.h>
47 #include <sys/unistd.h>
50 #include <net/if_var.h>
52 #include <dev/usb/usb.h>
53 #include <dev/usb/usbdi.h>
54 #include <dev/usb/usbdi_util.h>
57 #define USB_DEBUG_VAR axge_debug
58 #include <dev/usb/usb_debug.h>
59 #include <dev/usb/usb_process.h>
61 #include <dev/usb/net/usb_ethernet.h>
62 #include <dev/usb/net/if_axgereg.h>
65 * Various supported device vendors/products.
68 static const STRUCT_USB_HOST_ID axge_devs[] = {
69 #define AXGE_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) }
70 AXGE_DEV(ASIX, AX88178A),
71 AXGE_DEV(ASIX, AX88179),
72 AXGE_DEV(DLINK, DUB1312),
73 AXGE_DEV(LENOVO, GIGALAN),
74 AXGE_DEV(SITECOMEU, LN032),
84 } __packed axge_bulk_size[] = {
85 { 7, 0x4f, 0x00, 0x12, 0xff },
86 { 7, 0x20, 0x03, 0x16, 0xff },
87 { 7, 0xae, 0x07, 0x18, 0xff },
88 { 7, 0xcc, 0x4c, 0x18, 0x08 }
93 static device_probe_t axge_probe;
94 static device_attach_t axge_attach;
95 static device_detach_t axge_detach;
97 static usb_callback_t axge_bulk_read_callback;
98 static usb_callback_t axge_bulk_write_callback;
100 static miibus_readreg_t axge_miibus_readreg;
101 static miibus_writereg_t axge_miibus_writereg;
102 static miibus_statchg_t axge_miibus_statchg;
104 static uether_fn_t axge_attach_post;
105 static uether_fn_t axge_init;
106 static uether_fn_t axge_stop;
107 static uether_fn_t axge_start;
108 static uether_fn_t axge_tick;
109 static uether_fn_t axge_rxfilter;
111 static int axge_read_mem(struct axge_softc *, uint8_t, uint16_t,
112 uint16_t, void *, int);
113 static void axge_write_mem(struct axge_softc *, uint8_t, uint16_t,
114 uint16_t, void *, int);
115 static uint8_t axge_read_cmd_1(struct axge_softc *, uint8_t, uint16_t);
116 static uint16_t axge_read_cmd_2(struct axge_softc *, uint8_t, uint16_t,
118 static void axge_write_cmd_1(struct axge_softc *, uint8_t, uint16_t,
120 static void axge_write_cmd_2(struct axge_softc *, uint8_t, uint16_t,
122 static void axge_chip_init(struct axge_softc *);
123 static void axge_reset(struct axge_softc *);
125 static int axge_attach_post_sub(struct usb_ether *);
126 static int axge_ifmedia_upd(struct ifnet *);
127 static void axge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
128 static int axge_ioctl(struct ifnet *, u_long, caddr_t);
129 static void axge_rx_frame(struct usb_ether *, struct usb_page_cache *, int);
130 static void axge_rxeof(struct usb_ether *, struct usb_page_cache *,
131 unsigned int, unsigned int, uint32_t);
132 static void axge_csum_cfg(struct usb_ether *);
134 #define AXGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
137 static int axge_debug = 0;
139 static SYSCTL_NODE(_hw_usb, OID_AUTO, axge, CTLFLAG_RW, 0, "USB axge");
140 SYSCTL_INT(_hw_usb_axge, OID_AUTO, debug, CTLFLAG_RWTUN, &axge_debug, 0,
144 static const struct usb_config axge_config[AXGE_N_TRANSFER] = {
145 [AXGE_BULK_DT_WR] = {
147 .endpoint = UE_ADDR_ANY,
148 .direction = UE_DIR_OUT,
149 .frames = AXGE_N_FRAMES,
150 .bufsize = AXGE_N_FRAMES * MCLBYTES,
151 .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
152 .callback = axge_bulk_write_callback,
153 .timeout = 10000, /* 10 seconds */
155 [AXGE_BULK_DT_RD] = {
157 .endpoint = UE_ADDR_ANY,
158 .direction = UE_DIR_IN,
160 .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
161 .callback = axge_bulk_read_callback,
162 .timeout = 0, /* no timeout */
166 static device_method_t axge_methods[] = {
167 /* Device interface. */
168 DEVMETHOD(device_probe, axge_probe),
169 DEVMETHOD(device_attach, axge_attach),
170 DEVMETHOD(device_detach, axge_detach),
173 DEVMETHOD(miibus_readreg, axge_miibus_readreg),
174 DEVMETHOD(miibus_writereg, axge_miibus_writereg),
175 DEVMETHOD(miibus_statchg, axge_miibus_statchg),
180 static driver_t axge_driver = {
182 .methods = axge_methods,
183 .size = sizeof(struct axge_softc),
186 static devclass_t axge_devclass;
188 DRIVER_MODULE(axge, uhub, axge_driver, axge_devclass, NULL, NULL);
189 DRIVER_MODULE(miibus, axge, miibus_driver, miibus_devclass, NULL, NULL);
190 MODULE_DEPEND(axge, uether, 1, 1, 1);
191 MODULE_DEPEND(axge, usb, 1, 1, 1);
192 MODULE_DEPEND(axge, ether, 1, 1, 1);
193 MODULE_DEPEND(axge, miibus, 1, 1, 1);
194 MODULE_VERSION(axge, 1);
195 USB_PNP_HOST_INFO(axge_devs);
197 static const struct usb_ether_methods axge_ue_methods = {
198 .ue_attach_post = axge_attach_post,
199 .ue_attach_post_sub = axge_attach_post_sub,
200 .ue_start = axge_start,
201 .ue_init = axge_init,
202 .ue_stop = axge_stop,
203 .ue_tick = axge_tick,
204 .ue_setmulti = axge_rxfilter,
205 .ue_setpromisc = axge_rxfilter,
206 .ue_mii_upd = axge_ifmedia_upd,
207 .ue_mii_sts = axge_ifmedia_sts,
211 axge_read_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
212 uint16_t val, void *buf, int len)
214 struct usb_device_request req;
216 AXGE_LOCK_ASSERT(sc, MA_OWNED);
218 req.bmRequestType = UT_READ_VENDOR_DEVICE;
220 USETW(req.wValue, val);
221 USETW(req.wIndex, index);
222 USETW(req.wLength, len);
224 return (uether_do_request(&sc->sc_ue, &req, buf, 1000));
228 axge_write_mem(struct axge_softc *sc, uint8_t cmd, uint16_t index,
229 uint16_t val, void *buf, int len)
231 struct usb_device_request req;
233 AXGE_LOCK_ASSERT(sc, MA_OWNED);
235 req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
237 USETW(req.wValue, val);
238 USETW(req.wIndex, index);
239 USETW(req.wLength, len);
241 if (uether_do_request(&sc->sc_ue, &req, buf, 1000)) {
247 axge_read_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg)
251 axge_read_mem(sc, cmd, 1, reg, &val, 1);
256 axge_read_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
261 axge_read_mem(sc, cmd, index, reg, &val, 2);
266 axge_write_cmd_1(struct axge_softc *sc, uint8_t cmd, uint16_t reg, uint8_t val)
268 axge_write_mem(sc, cmd, 1, reg, &val, 1);
272 axge_write_cmd_2(struct axge_softc *sc, uint8_t cmd, uint16_t index,
273 uint16_t reg, uint16_t val)
278 axge_write_mem(sc, cmd, index, reg, &temp, 2);
282 axge_miibus_readreg(device_t dev, int phy, int reg)
284 struct axge_softc *sc;
288 sc = device_get_softc(dev);
289 locked = mtx_owned(&sc->sc_mtx);
293 val = axge_read_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy);
302 axge_miibus_writereg(device_t dev, int phy, int reg, int val)
304 struct axge_softc *sc;
307 sc = device_get_softc(dev);
308 locked = mtx_owned(&sc->sc_mtx);
312 axge_write_cmd_2(sc, AXGE_ACCESS_PHY, reg, phy, val);
321 axge_miibus_statchg(device_t dev)
323 struct axge_softc *sc;
324 struct mii_data *mii;
326 uint8_t link_status, tmp[5];
330 sc = device_get_softc(dev);
332 locked = mtx_owned(&sc->sc_mtx);
336 ifp = uether_getifp(&sc->sc_ue);
337 if (mii == NULL || ifp == NULL ||
338 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
341 sc->sc_flags &= ~AXGE_FLAG_LINK;
342 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
343 (IFM_ACTIVE | IFM_AVALID)) {
344 switch (IFM_SUBTYPE(mii->mii_media_active)) {
348 sc->sc_flags |= AXGE_FLAG_LINK;
355 /* Lost link, do nothing. */
356 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0)
359 link_status = axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PLSR);
362 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) {
364 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
366 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
370 switch (IFM_SUBTYPE(mii->mii_media_active)) {
372 val |= MSR_GM | MSR_EN_125MHZ;
373 if (link_status & PLSR_USB_SS)
374 memcpy(tmp, &axge_bulk_size[0], 5);
375 else if (link_status & PLSR_USB_HS)
376 memcpy(tmp, &axge_bulk_size[1], 5);
378 memcpy(tmp, &axge_bulk_size[3], 5);
382 if (link_status & (PLSR_USB_SS | PLSR_USB_HS))
383 memcpy(tmp, &axge_bulk_size[2], 5);
385 memcpy(tmp, &axge_bulk_size[3], 5);
388 memcpy(tmp, &axge_bulk_size[3], 5);
391 /* Rx bulk configuration. */
392 axge_write_mem(sc, AXGE_ACCESS_MAC, 5, AXGE_RX_BULKIN_QCTRL, tmp, 5);
393 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val);
400 axge_chip_init(struct axge_softc *sc)
402 /* Power up ethernet PHY. */
403 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, 0);
404 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, EPPRCR_IPRL);
405 uether_pause(&sc->sc_ue, hz / 4);
406 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT,
407 AXGE_CLK_SELECT_ACS | AXGE_CLK_SELECT_BCS);
408 uether_pause(&sc->sc_ue, hz / 10);
412 axge_reset(struct axge_softc *sc)
414 struct usb_config_descriptor *cd;
417 cd = usbd_get_config_descriptor(sc->sc_ue.ue_udev);
419 err = usbd_req_set_config(sc->sc_ue.ue_udev, &sc->sc_mtx,
420 cd->bConfigurationValue);
422 DPRINTF("reset failed (ignored)\n");
424 /* Wait a little while for the chip to get its brains in order. */
425 uether_pause(&sc->sc_ue, hz / 100);
427 /* Reinitialize controller to achieve full reset. */
432 axge_attach_post(struct usb_ether *ue)
434 struct axge_softc *sc;
436 sc = uether_getsc(ue);
438 /* Initialize controller and get station address. */
440 axge_read_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR,
441 ue->ue_eaddr, ETHER_ADDR_LEN);
445 axge_attach_post_sub(struct usb_ether *ue)
447 struct axge_softc *sc;
451 sc = uether_getsc(ue);
453 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
454 ifp->if_start = uether_start;
455 ifp->if_ioctl = axge_ioctl;
456 ifp->if_init = uether_init;
457 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
458 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
459 IFQ_SET_READY(&ifp->if_snd);
461 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_TXCSUM | IFCAP_RXCSUM;
462 ifp->if_hwassist = AXGE_CSUM_FEATURES;
463 ifp->if_capenable = ifp->if_capabilities;
466 error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
467 uether_ifmedia_upd, ue->ue_methods->ue_mii_sts,
468 BMSR_DEFCAPMASK, AXGE_PHY_ADDR, MII_OFFSET_ANY, MIIF_DOPAUSE);
478 axge_ifmedia_upd(struct ifnet *ifp)
480 struct axge_softc *sc;
481 struct mii_data *mii;
482 struct mii_softc *miisc;
487 AXGE_LOCK_ASSERT(sc, MA_OWNED);
489 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
491 error = mii_mediachg(mii);
497 * Report current media status.
500 axge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
502 struct axge_softc *sc;
503 struct mii_data *mii;
509 ifmr->ifm_active = mii->mii_media_active;
510 ifmr->ifm_status = mii->mii_media_status;
515 * Probe for a AX88179 chip.
518 axge_probe(device_t dev)
520 struct usb_attach_arg *uaa;
522 uaa = device_get_ivars(dev);
523 if (uaa->usb_mode != USB_MODE_HOST)
525 if (uaa->info.bConfigIndex != AXGE_CONFIG_IDX)
527 if (uaa->info.bIfaceIndex != AXGE_IFACE_IDX)
530 return (usbd_lookup_id_by_uaa(axge_devs, sizeof(axge_devs), uaa));
534 * Attach the interface. Allocate softc structures, do ifmedia
535 * setup and ethernet/BPF attach.
538 axge_attach(device_t dev)
540 struct usb_attach_arg *uaa;
541 struct axge_softc *sc;
542 struct usb_ether *ue;
546 uaa = device_get_ivars(dev);
547 sc = device_get_softc(dev);
550 device_set_usb_desc(dev);
551 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
553 iface_index = AXGE_IFACE_IDX;
554 error = usbd_transfer_setup(uaa->device, &iface_index,
555 sc->sc_xfer, axge_config, AXGE_N_TRANSFER, sc, &sc->sc_mtx);
557 device_printf(dev, "allocating USB transfers failed\n");
558 mtx_destroy(&sc->sc_mtx);
564 ue->ue_udev = uaa->device;
565 ue->ue_mtx = &sc->sc_mtx;
566 ue->ue_methods = &axge_ue_methods;
568 error = uether_ifattach(ue);
570 device_printf(dev, "could not attach interface\n");
573 return (0); /* success */
577 return (ENXIO); /* failure */
581 axge_detach(device_t dev)
583 struct axge_softc *sc;
584 struct usb_ether *ue;
587 sc = device_get_softc(dev);
589 if (device_is_attached(dev)) {
593 * ether_ifdetach(9) should be called first.
596 /* Force bulk-in to return a zero-length USB packet. */
597 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR);
598 val |= EPPRCR_BZ | EPPRCR_IPRL;
599 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_EPPRCR, val);
601 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CLK_SELECT, 0);
603 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, 0);
606 usbd_transfer_unsetup(sc->sc_xfer, AXGE_N_TRANSFER);
608 mtx_destroy(&sc->sc_mtx);
614 axge_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
616 struct axge_softc *sc;
617 struct usb_ether *ue;
618 struct usb_page_cache *pc;
621 sc = usbd_xfer_softc(xfer);
623 usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
625 switch (USB_GET_STATE(xfer)) {
626 case USB_ST_TRANSFERRED:
627 pc = usbd_xfer_get_frame(xfer, 0);
628 axge_rx_frame(ue, pc, actlen);
633 usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
634 usbd_transfer_submit(xfer);
639 if (error != USB_ERR_CANCELLED) {
640 usbd_xfer_set_stall(xfer);
648 axge_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
650 struct axge_softc *sc;
652 struct usb_page_cache *pc;
654 struct axge_frame_txhdr txhdr;
657 sc = usbd_xfer_softc(xfer);
658 ifp = uether_getifp(&sc->sc_ue);
660 switch (USB_GET_STATE(xfer)) {
661 case USB_ST_TRANSFERRED:
662 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
666 if ((sc->sc_flags & AXGE_FLAG_LINK) == 0 ||
667 (ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) {
669 * Don't send anything if there is no link or
670 * controller is busy.
675 for (nframes = 0; nframes < AXGE_N_FRAMES &&
676 !IFQ_DRV_IS_EMPTY(&ifp->if_snd); nframes++) {
677 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
680 usbd_xfer_set_frame_offset(xfer, nframes * MCLBYTES,
682 pc = usbd_xfer_get_frame(xfer, nframes);
684 txhdr.len = htole32(AXGE_TXBYTES(m->m_pkthdr.len));
685 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0 &&
686 (m->m_pkthdr.csum_flags & AXGE_CSUM_FEATURES) == 0)
687 txhdr.len |= htole32(AXGE_CSUM_DISABLE);
690 usbd_copy_in(pc, pos, &txhdr, sizeof(txhdr));
691 pos += sizeof(txhdr);
692 usbd_m_copy_in(pc, pos, m, 0, m->m_pkthdr.len);
693 pos += m->m_pkthdr.len;
696 * if there's a BPF listener, bounce a copy
697 * of this frame to him:
703 /* Set frame length. */
704 usbd_xfer_set_frame_len(xfer, nframes, pos);
709 * Update TX packet counter here. This is not
710 * correct way but it seems that there is no way
711 * to know how many packets are sent at the end
712 * of transfer because controller combines
713 * multiple writes into single one if there is
714 * room in TX buffer of controller.
716 if_inc_counter(ifp, IFCOUNTER_OPACKETS, nframes);
717 usbd_xfer_set_frames(xfer, nframes);
718 usbd_transfer_submit(xfer);
719 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
724 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
725 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
727 if (error != USB_ERR_CANCELLED) {
728 usbd_xfer_set_stall(xfer);
737 axge_tick(struct usb_ether *ue)
739 struct axge_softc *sc;
740 struct mii_data *mii;
742 sc = uether_getsc(ue);
744 AXGE_LOCK_ASSERT(sc, MA_OWNED);
750 axge_rxfilter(struct usb_ether *ue)
752 struct axge_softc *sc;
754 struct ifmultiaddr *ifma;
757 uint8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
759 sc = uether_getsc(ue);
760 ifp = uether_getifp(ue);
762 AXGE_LOCK_ASSERT(sc, MA_OWNED);
765 * Configure RX settings.
766 * Don't set RCR_IPE(IP header alignment on 32bit boundary) to disable
767 * inserting extra padding bytes. This wastes ethernet to USB host
768 * bandwidth as well as complicating RX handling logic. Current USB
769 * framework requires copying RX frames to mbufs so there is no need
770 * to worry about alignment.
772 rxmode = RCR_DROP_CRCERR | RCR_START;
773 if (ifp->if_flags & IFF_BROADCAST)
774 rxmode |= RCR_ACPT_BCAST;
775 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) {
776 if (ifp->if_flags & IFF_PROMISC)
777 rxmode |= RCR_PROMISC;
778 rxmode |= RCR_ACPT_ALL_MCAST;
779 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
783 rxmode |= RCR_ACPT_MCAST;
785 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
786 if (ifma->ifma_addr->sa_family != AF_LINK)
788 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
789 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
790 hashtbl[h / 8] |= 1 << (h % 8);
792 if_maddr_runlock(ifp);
794 axge_write_mem(sc, AXGE_ACCESS_MAC, 8, AXGE_MFA, (void *)&hashtbl, 8);
795 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_RCR, rxmode);
799 axge_start(struct usb_ether *ue)
801 struct axge_softc *sc;
803 sc = uether_getsc(ue);
805 * Start the USB transfers, if not already started.
807 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_RD]);
808 usbd_transfer_start(sc->sc_xfer[AXGE_BULK_DT_WR]);
812 axge_init(struct usb_ether *ue)
814 struct axge_softc *sc;
817 sc = uether_getsc(ue);
818 ifp = uether_getifp(ue);
819 AXGE_LOCK_ASSERT(sc, MA_OWNED);
821 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
825 * Cancel pending I/O and free all RX/TX buffers.
831 /* Set MAC address. */
832 axge_write_mem(sc, AXGE_ACCESS_MAC, ETHER_ADDR_LEN, AXGE_NIDR,
833 IF_LLADDR(ifp), ETHER_ADDR_LEN);
835 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLLR, 0x34);
836 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_PWLHR, 0x52);
838 /* Configure TX/RX checksum offloading. */
841 /* Configure RX filters. */
846 * Controller supports wakeup on link change detection,
847 * magic packet and wakeup frame recpetion. But it seems
848 * there is no framework for USB ethernet suspend/wakeup.
849 * Disable all wakeup functions.
851 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR, 0);
852 (void)axge_read_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_MMSR);
854 /* Configure default medium type. */
855 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, MSR_GM | MSR_FD |
856 MSR_RFC | MSR_TFC | MSR_RE);
858 usbd_xfer_set_stall(sc->sc_xfer[AXGE_BULK_DT_WR]);
860 ifp->if_drv_flags |= IFF_DRV_RUNNING;
861 /* Switch to selected media. */
862 axge_ifmedia_upd(ifp);
866 axge_stop(struct usb_ether *ue)
868 struct axge_softc *sc;
872 sc = uether_getsc(ue);
873 ifp = uether_getifp(ue);
875 AXGE_LOCK_ASSERT(sc, MA_OWNED);
877 val = axge_read_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR);
879 axge_write_cmd_2(sc, AXGE_ACCESS_MAC, 2, AXGE_MSR, val);
881 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
882 sc->sc_flags &= ~AXGE_FLAG_LINK;
885 * Stop all the transfers, if not already stopped:
887 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_WR]);
888 usbd_transfer_stop(sc->sc_xfer[AXGE_BULK_DT_RD]);
892 axge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
894 struct usb_ether *ue;
895 struct axge_softc *sc;
897 int error, mask, reinit;
900 sc = uether_getsc(ue);
901 ifr = (struct ifreq *)data;
904 if (cmd == SIOCSIFCAP) {
906 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
907 if ((mask & IFCAP_TXCSUM) != 0 &&
908 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
909 ifp->if_capenable ^= IFCAP_TXCSUM;
910 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
911 ifp->if_hwassist |= AXGE_CSUM_FEATURES;
913 ifp->if_hwassist &= ~AXGE_CSUM_FEATURES;
916 if ((mask & IFCAP_RXCSUM) != 0 &&
917 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
918 ifp->if_capenable ^= IFCAP_RXCSUM;
921 if (reinit > 0 && ifp->if_drv_flags & IFF_DRV_RUNNING)
922 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
929 error = uether_ioctl(ifp, cmd, data);
935 axge_rx_frame(struct usb_ether *ue, struct usb_page_cache *pc, int actlen)
937 struct axge_frame_rxhdr pkt_hdr;
940 uint32_t pkt_cnt, pkt_end;
944 /* verify we have enough data */
945 if (actlen < (int)sizeof(rxhdr))
950 usbd_copy_out(pc, actlen - sizeof(rxhdr), &rxhdr, sizeof(rxhdr));
951 rxhdr = le32toh(rxhdr);
953 pkt_cnt = rxhdr & 0xFFFF;
954 hdr_off = pkt_end = (rxhdr >> 16) & 0xFFFF;
957 * <----------------------- actlen ------------------------>
958 * [frame #0]...[frame #N][pkt_hdr #0]...[pkt_hdr #N][rxhdr]
959 * Each RX frame would be aligned on 8 bytes boundary. If
960 * RCR_IPE bit is set in AXGE_RCR register, there would be 2
961 * padding bytes and 6 dummy bytes(as the padding also should
962 * be aligned on 8 bytes boundary) for each RX frame to align
963 * IP header on 32bits boundary. Driver don't set RCR_IPE bit
964 * of AXGE_RCR register, so there should be no padding bytes
965 * which simplifies RX logic a lot.
968 /* verify the header offset */
969 if ((int)(hdr_off + sizeof(pkt_hdr)) > actlen) {
970 DPRINTF("End of packet headers\n");
973 usbd_copy_out(pc, hdr_off, &pkt_hdr, sizeof(pkt_hdr));
974 pkt_hdr.status = le32toh(pkt_hdr.status);
975 pktlen = AXGE_RXBYTES(pkt_hdr.status);
976 if (pos + pktlen > pkt_end) {
977 DPRINTF("Data position reached end\n");
981 if (AXGE_RX_ERR(pkt_hdr.status) != 0) {
982 DPRINTF("Dropped a packet\n");
983 if_inc_counter(ue->ue_ifp, IFCOUNTER_IERRORS, 1);
985 axge_rxeof(ue, pc, pos, pktlen, pkt_hdr.status);
986 pos += (pktlen + 7) & ~7;
987 hdr_off += sizeof(pkt_hdr);
992 axge_rxeof(struct usb_ether *ue, struct usb_page_cache *pc, unsigned int offset,
993 unsigned int len, uint32_t status)
999 if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) {
1000 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1004 if (len > MHLEN - ETHER_ALIGN)
1005 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1007 m = m_gethdr(M_NOWAIT, MT_DATA);
1009 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1012 m->m_pkthdr.rcvif = ifp;
1013 m->m_len = m->m_pkthdr.len = len;
1014 m->m_data += ETHER_ALIGN;
1016 usbd_copy_out(pc, offset, mtod(m, uint8_t *), len);
1018 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1019 if ((status & AXGE_RX_L3_CSUM_ERR) == 0 &&
1020 (status & AXGE_RX_L3_TYPE_MASK) == AXGE_RX_L3_TYPE_IPV4)
1021 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
1023 if ((status & AXGE_RX_L4_CSUM_ERR) == 0 &&
1024 ((status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_UDP ||
1025 (status & AXGE_RX_L4_TYPE_MASK) == AXGE_RX_L4_TYPE_TCP)) {
1026 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1028 m->m_pkthdr.csum_data = 0xffff;
1031 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1033 _IF_ENQUEUE(&ue->ue_rxq, m);
1037 axge_csum_cfg(struct usb_ether *ue)
1039 struct axge_softc *sc;
1043 sc = uether_getsc(ue);
1044 AXGE_LOCK_ASSERT(sc, MA_OWNED);
1045 ifp = uether_getifp(ue);
1048 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1049 csum |= CTCR_IP | CTCR_TCP | CTCR_UDP;
1050 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CTCR, csum);
1053 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1054 csum |= CRCR_IP | CRCR_TCP | CRCR_UDP;
1055 axge_write_cmd_1(sc, AXGE_ACCESS_MAC, AXGE_CRCR, csum);