2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
6 * This code is derived from software contributed to The NetBSD Foundation
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
37 * from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 * HME Ethernet module driver.
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58 * In reality, we can do the same technique for UDP datagram too. However,
59 * the hardware doesn't compensate the checksum for UDP datagram which can yield
60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61 * can be reactivated by setting special link option link0 with ifconfig(8).
63 #define HME_CSUM_FEATURES (CSUM_TCP)
65 #define KTR_HME KTR_CT2 /* XXX */
67 #include <sys/param.h>
68 #include <sys/systm.h>
70 #include <sys/endian.h>
71 #include <sys/kernel.h>
72 #include <sys/module.h>
75 #include <sys/malloc.h>
76 #include <sys/socket.h>
77 #include <sys/sockio.h>
80 #include <net/ethernet.h>
82 #include <net/if_arp.h>
83 #include <net/if_dl.h>
84 #include <net/if_media.h>
85 #include <net/if_types.h>
86 #include <net/if_vlan_var.h>
88 #include <netinet/in.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/ip.h>
91 #include <netinet/tcp.h>
92 #include <netinet/udp.h>
94 #include <dev/mii/mii.h>
95 #include <dev/mii/miivar.h>
97 #include <machine/bus.h>
99 #include <dev/hme/if_hmereg.h>
100 #include <dev/hme/if_hmevar.h>
102 static void hme_start(struct ifnet *);
103 static void hme_start_locked(struct ifnet *);
104 static void hme_stop(struct hme_softc *);
105 static int hme_ioctl(struct ifnet *, u_long, caddr_t);
106 static void hme_tick(void *);
107 static void hme_watchdog(struct ifnet *);
108 static void hme_init(void *);
109 static void hme_init_locked(struct hme_softc *);
110 static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
111 static int hme_meminit(struct hme_softc *);
112 static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
113 u_int32_t, u_int32_t);
114 static void hme_mifinit(struct hme_softc *);
115 static void hme_setladrf(struct hme_softc *, int);
117 static int hme_mediachange(struct ifnet *);
118 static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
120 static int hme_load_txmbuf(struct hme_softc *, struct mbuf **);
121 static void hme_read(struct hme_softc *, int, int, u_int32_t);
122 static void hme_eint(struct hme_softc *, u_int);
123 static void hme_rint(struct hme_softc *);
124 static void hme_tint(struct hme_softc *);
125 static void hme_txcksum(struct mbuf *, u_int32_t *);
126 static void hme_rxcksum(struct mbuf *, u_int32_t);
128 static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
130 devclass_t hme_devclass;
134 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
135 MODULE_DEPEND(hme, miibus, 1, 1, 1);
137 #define HME_SPC_READ_4(spc, sc, offs) \
138 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
140 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
141 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
144 #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
145 #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
146 #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
147 #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
148 #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
149 #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
150 #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
151 #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
152 #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
153 #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
156 #define HME_WHINE(dev, ...) do { \
157 if (hme_nerr++ < HME_MAXERR) \
158 device_printf(dev, __VA_ARGS__); \
159 if (hme_nerr == HME_MAXERR) { \
160 device_printf(dev, "too may errors; not reporting any " \
165 /* Support oversized VLAN frames. */
166 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
169 hme_config(struct hme_softc *sc)
172 struct mii_softc *child;
174 int error, rdesc, tdesc, i;
176 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
181 * HME common initialization.
183 * hme_softc fields that must be initialized by the front-end:
188 * the bus handles, tags and offsets (splitted for SBus compatability):
189 * sc_seb{t,h,o} (Shared Ethernet Block registers)
190 * sc_erx{t,h,o} (Receiver Unit registers)
191 * sc_etx{t,h,o} (Transmitter Unit registers)
192 * sc_mac{t,h,o} (MAC registers)
193 * sc_mif{t,h,o} (Management Interface registers)
195 * the maximum bus burst size:
200 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
202 /* Make sure the chip is stopped. */
208 * Allocate DMA capable memory
209 * Buffer descriptors must be aligned on a 2048 byte boundary;
210 * take this into account when calculating the size. Note that
211 * the maximum number of descriptors (256) occupies 2048 bytes,
212 * so we allocate that much regardless of HME_N*DESC.
216 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
217 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
218 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag);
222 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
223 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
224 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex,
225 &sc->sc_lock, &sc->sc_cdmatag);
229 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
230 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
231 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
232 NULL, NULL, &sc->sc_rdmatag);
236 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
237 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
238 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
239 NULL, NULL, &sc->sc_tdmatag);
243 /* Allocate control/TX DMA buffer */
244 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
247 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
251 /* Load the buffer */
252 sc->sc_rb.rb_dmabase = 0;
253 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
254 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
255 sc->sc_rb.rb_dmabase == 0) {
256 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
260 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
261 sc->sc_rb.rb_dmabase);
264 * Prepare the RX descriptors. rdesc serves as marker for the last
265 * processed descriptor and may be used later on.
267 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
268 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
269 error = bus_dmamap_create(sc->sc_rdmatag, 0,
270 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
274 error = bus_dmamap_create(sc->sc_rdmatag, 0,
275 &sc->sc_rb.rb_spare_dmamap);
278 /* Same for the TX descs. */
279 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
280 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
281 error = bus_dmamap_create(sc->sc_tdmatag, 0,
282 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
287 sc->sc_csum_features = HME_CSUM_FEATURES;
288 /* Initialize ifnet structure. */
290 if_initname(ifp, device_get_name(sc->sc_dev),
291 device_get_unit(sc->sc_dev));
292 ifp->if_mtu = ETHERMTU;
293 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
294 ifp->if_start = hme_start;
295 ifp->if_ioctl = hme_ioctl;
296 ifp->if_init = hme_init;
297 ifp->if_watchdog = hme_watchdog;
298 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
299 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
300 IFQ_SET_READY(&ifp->if_snd);
304 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
305 hme_mediastatus)) != 0) {
306 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
309 sc->sc_mii = device_get_softc(sc->sc_miibus);
312 * Walk along the list of attached MII devices and
313 * establish an `MII instance' to `phy number'
314 * mapping. We'll use this mapping in media change
315 * requests to determine which phy to use to program
316 * the MIF configuration register.
318 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
319 child = LIST_NEXT(child, mii_list)) {
321 * Note: we support just two PHYs: the built-in
322 * internal device and an external on the MII
325 if (child->mii_phy > 1 || child->mii_inst > 1) {
326 device_printf(sc->sc_dev, "cannot accommodate "
327 "MII device %s at phy %d, instance %d\n",
328 device_get_name(child->mii_dev),
329 child->mii_phy, child->mii_inst);
333 sc->sc_phys[child->mii_inst] = child->mii_phy;
336 /* Attach the interface. */
337 ether_ifattach(ifp, sc->sc_enaddr);
340 * Tell the upper layer(s) we support long frames/checksum offloads.
342 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
343 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
344 ifp->if_hwassist |= sc->sc_csum_features;
345 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
349 for (i = 0; i < tdesc; i++) {
350 bus_dmamap_destroy(sc->sc_tdmatag,
351 sc->sc_rb.rb_txdesc[i].htx_dmamap);
353 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
355 for (i = 0; i < rdesc; i++) {
356 bus_dmamap_destroy(sc->sc_rdmatag,
357 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
359 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
361 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
363 bus_dma_tag_destroy(sc->sc_tdmatag);
365 bus_dma_tag_destroy(sc->sc_rdmatag);
367 bus_dma_tag_destroy(sc->sc_cdmatag);
369 bus_dma_tag_destroy(sc->sc_pdmatag);
376 hme_detach(struct hme_softc *sc)
378 struct ifnet *ifp = sc->sc_ifp;
384 callout_drain(&sc->sc_tick_ch);
387 device_delete_child(sc->sc_dev, sc->sc_miibus);
389 for (i = 0; i < HME_NTXQ; i++) {
390 bus_dmamap_destroy(sc->sc_tdmatag,
391 sc->sc_rb.rb_txdesc[i].htx_dmamap);
393 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
394 for (i = 0; i < HME_NRXDESC; i++) {
395 bus_dmamap_destroy(sc->sc_rdmatag,
396 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
398 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
399 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE);
400 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
401 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
402 bus_dma_tag_destroy(sc->sc_tdmatag);
403 bus_dma_tag_destroy(sc->sc_rdmatag);
404 bus_dma_tag_destroy(sc->sc_cdmatag);
405 bus_dma_tag_destroy(sc->sc_pdmatag);
409 hme_suspend(struct hme_softc *sc)
418 hme_resume(struct hme_softc *sc)
420 struct ifnet *ifp = sc->sc_ifp;
423 if ((ifp->if_flags & IFF_UP) != 0)
429 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
431 struct hme_softc *sc = (struct hme_softc *)xsc;
435 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
436 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
442 struct hme_softc *sc = arg;
445 HME_LOCK_ASSERT(sc, MA_OWNED);
449 * Unload collision counters
451 ifp->if_collisions +=
452 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
453 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
454 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
455 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
458 * then clear the hardware counters.
460 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
461 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
462 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
463 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
465 mii_tick(sc->sc_mii);
467 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
471 hme_stop(struct hme_softc *sc)
476 callout_stop(&sc->sc_tick_ch);
477 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
479 /* Reset transmitter and receiver */
480 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
483 for (n = 0; n < 20; n++) {
484 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
485 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
490 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
494 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
495 * ring for subsequent use.
498 hme_discard_rxbuf(struct hme_softc *sc, int ix)
502 * Dropped a packet, reinitialize the descriptor and turn the
503 * ownership back to the hardware.
505 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
506 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix])));
510 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
512 struct hme_rxdesc *rd;
514 bus_dma_segment_t segs[1];
519 rd = &sc->sc_rb.rb_rxdesc[ri];
520 unmap = rd->hrx_m != NULL;
521 if (unmap && keepold) {
523 * Reinitialize the descriptor flags, as they may have been
524 * altered by the hardware.
526 hme_discard_rxbuf(sc, ri);
529 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
531 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
532 b = mtod(m, uintptr_t);
534 * Required alignment boundary. At least 16 is needed, but since
535 * the mapping must be done in a way that a burst can start on a
536 * natural boundary we might need to extend this.
538 a = imax(HME_MINRXALIGN, sc->sc_burst);
540 * Make sure the buffer suitably aligned. The 2 byte offset is removed
541 * when the mbuf is handed up. XXX: this ensures at least 16 byte
542 * alignment of the header adjacent to the ethernet header, which
543 * should be sufficient in all cases. Nevertheless, this second-guesses
546 m_adj(m, roundup2(b, a) - b);
547 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
548 m, segs, &nsegs, 0) != 0) {
552 /* If nsegs is wrong then the stack is corrupt */
553 KASSERT(nsegs == 1, ("Too many segments returned!"));
555 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
556 BUS_DMASYNC_POSTREAD);
557 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
559 map = rd->hrx_dmamap;
560 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
561 sc->sc_rb.rb_spare_dmamap = map;
562 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
563 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, segs[0].ds_addr);
565 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
566 HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
571 hme_meminit(struct hme_softc *sc)
573 struct hme_ring *hr = &sc->sc_rb;
574 struct hme_txdesc *td;
581 dma = hr->rb_dmabase;
584 * Allocate transmit descriptors
588 p += HME_NTXDESC * HME_XD_SIZE;
589 dma += HME_NTXDESC * HME_XD_SIZE;
590 /* We have reserved descriptor space until the next 2048 byte boundary.*/
591 dma = (bus_addr_t)roundup((u_long)dma, 2048);
592 p = (caddr_t)roundup((u_long)p, 2048);
595 * Allocate receive descriptors
599 p += HME_NRXDESC * HME_XD_SIZE;
600 dma += HME_NRXDESC * HME_XD_SIZE;
601 /* Again move forward to the next 2048 byte boundary.*/
602 dma = (bus_addr_t)roundup((u_long)dma, 2048);
603 p = (caddr_t)roundup((u_long)p, 2048);
606 * Initialize transmit buffer descriptors
608 for (i = 0; i < HME_NTXDESC; i++) {
609 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
610 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
613 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
614 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
615 for (i = 0; i < HME_NTXQ; i++) {
616 td = &sc->sc_rb.rb_txdesc[i];
617 if (td->htx_m != NULL) {
618 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
619 BUS_DMASYNC_POSTWRITE);
620 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
624 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
628 * Initialize receive buffer descriptors
630 for (i = 0; i < HME_NRXDESC; i++) {
631 error = hme_add_rxbuf(sc, i, 1);
636 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD);
637 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE);
639 hr->rb_tdhead = hr->rb_tdtail = 0;
642 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
644 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
646 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
647 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
648 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
649 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
654 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
655 u_int32_t clr, u_int32_t set)
661 HME_MAC_WRITE_4(sc, reg, val);
662 if (clr == 0 && set == 0)
663 return (1); /* just write, no bits to wait for */
667 val = HME_MAC_READ_4(sc, reg);
669 /* After 3.5ms, we should have been done. */
670 device_printf(sc->sc_dev, "timeout while writing to "
671 "MAC configuration register\n");
674 } while ((val & clr) != 0 && (val & set) != set);
679 * Initialization of interface; set up initialization block
680 * and transmit/receive descriptor rings.
685 struct hme_softc *sc = (struct hme_softc *)xsc;
693 hme_init_locked(struct hme_softc *sc)
695 struct ifnet *ifp = sc->sc_ifp;
699 HME_LOCK_ASSERT(sc, MA_OWNED);
701 * Initialization sequence. The numbered steps below correspond
702 * to the sequence outlined in section 6.3.5.1 in the Ethernet
703 * Channel Engine manual (part of the PCIO manual).
704 * See also the STP2002-STQ document from Sun Microsystems.
707 /* step 1 & 2. Reset the Ethernet Channel */
710 /* Re-initialize the MIF */
714 /* Mask all MIF interrupts, just in case */
715 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
718 /* step 3. Setup data structures in host memory */
719 if (hme_meminit(sc) != 0) {
720 device_printf(sc->sc_dev, "out of buffers; init aborted.");
724 /* step 4. TX MAC registers & counters */
725 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
726 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
727 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
728 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
729 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
731 /* Load station MAC address */
732 ea = IF_LLADDR(sc->sc_ifp);
733 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
734 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
735 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
738 * Init seed for backoff
739 * (source suggested by manual: low 10 bits of MAC address)
741 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
742 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
745 /* Note: Accepting power-on default for other MAC registers here.. */
747 /* step 5. RX MAC registers & counters */
750 /* step 6 & 7. Program Descriptor Ring Base Addresses */
751 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
752 /* Transmit Descriptor ring size: in increments of 16 */
753 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
755 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
756 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
758 /* step 8. Global Configuration & Interrupt Mask */
759 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
760 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
761 HME_SEB_STAT_HOSTTOTX |
762 HME_SEB_STAT_RXTOHOST |
764 HME_SEB_STAT_TXPERR |
765 HME_SEB_STAT_RCNTEXP |
766 HME_SEB_STAT_ALL_ERRORS ));
768 switch (sc->sc_burst) {
773 v = HME_SEB_CFG_BURST16;
776 v = HME_SEB_CFG_BURST32;
779 v = HME_SEB_CFG_BURST64;
783 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
784 * Allowing 64bit transfers breaks TX checksum offload as well.
785 * Don't know this comes from hardware bug or driver's DMAing
788 * if (sc->sc_pci == 0)
789 * v |= HME_SEB_CFG_64BIT;
791 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
793 /* step 9. ETX Configuration: use mostly default values */
796 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
797 v |= HME_ETX_CFG_DMAENABLE;
798 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
800 /* step 10. ERX Configuration */
801 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
803 /* Encode Receive Descriptor ring size: four possible values */
804 v &= ~HME_ERX_CFG_RINGSIZEMSK;
805 switch (HME_NRXDESC) {
807 v |= HME_ERX_CFG_RINGSIZE32;
810 v |= HME_ERX_CFG_RINGSIZE64;
813 v |= HME_ERX_CFG_RINGSIZE128;
816 v |= HME_ERX_CFG_RINGSIZE256;
819 printf("hme: invalid Receive Descriptor ring size\n");
823 /* Enable DMA, fix RX first byte offset. */
824 v &= ~HME_ERX_CFG_FBO_MASK;
825 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
826 /* RX TCP/UDP checksum offset */
827 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
828 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
830 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
831 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
833 /* step 11. XIF Configuration */
834 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
836 /* If an external transceiver is connected, enable its MII drivers */
837 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
838 v |= HME_MAC_XIF_MIIENABLE;
839 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
840 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
842 /* step 12. RX_MAC Configuration Register */
843 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
844 v |= HME_MAC_RXCFG_ENABLE;
845 v &= ~(HME_MAC_RXCFG_DCRCS);
846 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
847 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
849 /* step 13. TX_MAC Configuration Register */
850 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
851 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
852 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
853 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
855 /* step 14. Issue Transmit Pending command */
858 /* Debug: double-check. */
859 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
860 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
861 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
862 HME_ERX_READ_4(sc, HME_ERXI_RING),
863 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
864 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
865 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
866 HME_ERX_READ_4(sc, HME_ERXI_CFG),
867 HME_ETX_READ_4(sc, HME_ETXI_CFG));
868 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
869 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
870 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
873 /* Set the current media. */
875 * mii_mediachg(sc->sc_mii);
878 /* Start the one second timer. */
879 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
881 ifp->if_drv_flags |= IFF_DRV_RUNNING;
882 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
884 hme_start_locked(ifp);
887 /* TX TCP/UDP checksum */
889 hme_txcksum(struct mbuf *m, u_int32_t *cflags)
892 u_int32_t offset, offset2;
895 for(; m && m->m_len == 0; m = m->m_next)
897 if (m == NULL || m->m_len < ETHER_HDR_LEN) {
898 printf("hme_txcksum: m_len < ETHER_HDR_LEN\n");
899 return; /* checksum will be corrupted */
901 if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) {
902 if (m->m_len != ETHER_HDR_LEN) {
903 printf("hme_txcksum: m_len != ETHER_HDR_LEN\n");
904 return; /* checksum will be corrupted */
907 for(m = m->m_next; m && m->m_len == 0; m = m->m_next)
910 return; /* checksum will be corrupted */
911 ip = mtod(m, struct ip *);
913 p = mtod(m, caddr_t);
917 offset2 = m->m_pkthdr.csum_data;
918 offset = (ip->ip_hl << 2) + ETHER_HDR_LEN;
919 *cflags = offset << HME_XD_TXCKSUM_SSHIFT;
920 *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT);
921 *cflags |= HME_XD_TXCKSUM;
925 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
926 * start the transmission.
927 * Returns 0 on success, -1 if there were not enough free descriptors to map
928 * the packet, or an errno otherwise.
930 * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf()
931 * are readable from the nearest burst boundary on (i.e. potentially before
932 * ds_addr) to the first boundary beyond the end. This is usually a safe
933 * assumption to make, but is not documented.
936 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
938 struct hme_txdesc *htx;
941 int i, pci, si, ri, nseg;
942 u_int32_t flags, cflags = 0;
945 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
948 if ((m->m_pkthdr.csum_flags & sc->sc_csum_features) != 0)
949 hme_txcksum(m, &cflags);
950 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
951 m, sc->sc_rb.rb_txsegs, &nseg, 0);
952 if (error == EFBIG) {
953 n = m_defrag(m, M_DONTWAIT);
960 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
961 m, sc->sc_rb.rb_txsegs, &nseg, 0);
967 } else if (error != 0)
974 if (sc->sc_rb.rb_td_nbusy + nseg >= HME_NTXDESC) {
975 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
976 /* retry with m_defrag(9)? */
979 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
981 si = ri = sc->sc_rb.rb_tdhead;
982 txd = sc->sc_rb.rb_txd;
984 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
985 HME_XD_GETFLAGS(pci, txd, ri));
986 for (i = 0; i < nseg; i++) {
987 /* Fill the ring entry. */
988 flags = HME_XD_ENCODE_TSIZE(sc->sc_rb.rb_txsegs[i].ds_len);
990 flags |= HME_XD_SOP | cflags;
992 flags |= HME_XD_OWN | cflags;
993 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
995 HME_XD_SETADDR(pci, txd, ri, sc->sc_rb.rb_txsegs[i].ds_addr);
996 HME_XD_SETFLAGS(pci, txd, ri, flags);
997 sc->sc_rb.rb_td_nbusy++;
998 htx->htx_lastdesc = ri;
999 ri = (ri + 1) % HME_NTXDESC;
1001 sc->sc_rb.rb_tdhead = ri;
1003 /* set EOP on the last descriptor */
1004 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1005 flags = HME_XD_GETFLAGS(pci, txd, ri);
1006 flags |= HME_XD_EOP;
1007 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1009 HME_XD_SETFLAGS(pci, txd, ri, flags);
1011 /* Turn the first descriptor ownership to the hme */
1012 flags = HME_XD_GETFLAGS(pci, txd, si);
1013 flags |= HME_XD_OWN;
1014 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1016 HME_XD_SETFLAGS(pci, txd, si, flags);
1018 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1019 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1022 /* start the transmission. */
1023 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1029 * Pass a packet to the higher levels.
1032 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1034 struct ifnet *ifp = sc->sc_ifp;
1037 if (len <= sizeof(struct ether_header) ||
1038 len > HME_MAX_FRAMESIZE) {
1040 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1044 hme_discard_rxbuf(sc, ix);
1048 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1049 CTR1(KTR_HME, "hme_read: len %d", len);
1051 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1053 * hme_add_rxbuf will leave the old buffer in the ring until
1054 * it is sure that a new buffer can be mapped. If it can not,
1055 * drop the packet, but leave the interface up.
1058 hme_discard_rxbuf(sc, ix);
1064 m->m_pkthdr.rcvif = ifp;
1065 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1066 m_adj(m, HME_RXOFFS);
1067 /* RX TCP/UDP checksum */
1068 if (ifp->if_capenable & IFCAP_RXCSUM)
1069 hme_rxcksum(m, flags);
1070 /* Pass the packet up. */
1072 (*ifp->if_input)(ifp, m);
1077 hme_start(struct ifnet *ifp)
1079 struct hme_softc *sc = ifp->if_softc;
1082 hme_start_locked(ifp);
1087 hme_start_locked(struct ifnet *ifp)
1089 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1093 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1097 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1098 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1099 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1103 error = hme_load_txmbuf(sc, &m);
1107 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1108 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1115 /* Set watchdog timer if a packet was queued */
1117 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1118 BUS_DMASYNC_PREWRITE);
1124 * Transmit interrupt.
1127 hme_tint(struct hme_softc *sc)
1130 struct ifnet *ifp = sc->sc_ifp;
1131 struct hme_txdesc *htx;
1132 unsigned int ri, txflags;
1134 txd = sc->sc_rb.rb_txd;
1135 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1136 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1137 /* Fetch current position in the transmit ring */
1138 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1139 if (sc->sc_rb.rb_td_nbusy <= 0) {
1140 CTR0(KTR_HME, "hme_tint: not busy!");
1144 txflags = HME_XD_GETFLAGS(sc->sc_pci, txd, ri);
1145 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1147 if ((txflags & HME_XD_OWN) != 0)
1150 CTR0(KTR_HME, "hme_tint: not owned");
1151 --sc->sc_rb.rb_td_nbusy;
1152 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1154 /* Complete packet transmitted? */
1155 if ((txflags & HME_XD_EOP) == 0)
1158 KASSERT(htx->htx_lastdesc == ri,
1159 ("hme_tint: ring indices skewed: %d != %d!",
1160 htx->htx_lastdesc, ri));
1161 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1162 BUS_DMASYNC_POSTWRITE);
1163 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1166 m_freem(htx->htx_m);
1168 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1169 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1170 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1172 /* Turn off watchdog if hme(4) transmitted queued packet */
1173 ifp->if_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
1176 sc->sc_rb.rb_tdtail = ri;
1178 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1179 !IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1180 hme_start_locked(ifp);
1184 * RX TCP/UDP checksum
1187 hme_rxcksum(struct mbuf *m, u_int32_t flags)
1189 struct ether_header *eh;
1192 int32_t hlen, len, pktlen;
1193 u_int16_t cksum, *opts;
1196 pktlen = m->m_pkthdr.len;
1197 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1199 eh = mtod(m, struct ether_header *);
1200 if (eh->ether_type != htons(ETHERTYPE_IP))
1202 ip = (struct ip *)(eh + 1);
1203 if (ip->ip_v != IPVERSION)
1206 hlen = ip->ip_hl << 2;
1207 pktlen -= sizeof(struct ether_header);
1208 if (hlen < sizeof(struct ip))
1210 if (ntohs(ip->ip_len) < hlen)
1212 if (ntohs(ip->ip_len) != pktlen)
1214 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1215 return; /* can't handle fragmented packet */
1219 if (pktlen < (hlen + sizeof(struct tcphdr)))
1223 if (pktlen < (hlen + sizeof(struct udphdr)))
1225 uh = (struct udphdr *)((caddr_t)ip + hlen);
1226 if (uh->uh_sum == 0)
1227 return; /* no checksum */
1233 cksum = htons(~(flags & HME_XD_RXCKSUM));
1234 /* checksum fixup for IP options */
1235 len = hlen - sizeof(struct ip);
1237 opts = (u_int16_t *)(ip + 1);
1238 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1239 temp32 = cksum - *opts;
1240 temp32 = (temp32 >> 16) + (temp32 & 65535);
1241 cksum = temp32 & 65535;
1244 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1245 m->m_pkthdr.csum_data = cksum;
1249 * Receive interrupt.
1252 hme_rint(struct hme_softc *sc)
1254 caddr_t xdr = sc->sc_rb.rb_rxd;
1255 struct ifnet *ifp = sc->sc_ifp;
1256 unsigned int ri, len;
1261 * Process all buffers with valid data.
1263 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1264 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1265 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1266 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1267 if ((flags & HME_XD_OWN) != 0)
1271 if ((flags & HME_XD_OFL) != 0) {
1272 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1273 "flags=0x%x\n", ri, flags);
1275 hme_discard_rxbuf(sc, ri);
1277 len = HME_XD_DECODE_RSIZE(flags);
1278 hme_read(sc, ri, len, flags);
1282 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1283 BUS_DMASYNC_PREWRITE);
1285 sc->sc_rb.rb_rdtail = ri;
1289 hme_eint(struct hme_softc *sc, u_int status)
1292 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1293 device_printf(sc->sc_dev, "XXXlink status changed\n");
1297 /* check for fatal errors that needs reset to unfreeze DMA engine */
1298 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1299 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1300 hme_init_locked(sc);
1307 struct hme_softc *sc = (struct hme_softc *)v;
1311 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1312 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1314 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1315 hme_eint(sc, status);
1317 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1320 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1327 hme_watchdog(struct ifnet *ifp)
1329 struct hme_softc *sc = ifp->if_softc;
1336 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1337 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
1339 device_printf(sc->sc_dev, "device timeout\n");
1342 hme_init_locked(sc);
1347 * Initialize the MII Management Interface
1350 hme_mifinit(struct hme_softc *sc)
1354 /* Configure the MIF in frame mode */
1355 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1356 v &= ~HME_MIF_CFG_BBMODE;
1357 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1364 hme_mii_readreg(device_t dev, int phy, int reg)
1366 struct hme_softc *sc = device_get_softc(dev);
1370 /* Select the desired PHY in the MIF configuration register */
1371 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1372 /* Clear PHY select bit */
1373 v &= ~HME_MIF_CFG_PHY;
1374 if (phy == HME_PHYAD_EXTERNAL)
1375 /* Set PHY select bit to get at external device */
1376 v |= HME_MIF_CFG_PHY;
1377 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1379 /* Construct the frame command */
1380 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1382 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1383 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1384 (reg << HME_MIF_FO_REGAD_SHIFT);
1386 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1387 for (n = 0; n < 100; n++) {
1389 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1390 if (v & HME_MIF_FO_TALSB) {
1391 return (v & HME_MIF_FO_DATA);
1395 device_printf(sc->sc_dev, "mii_read timeout\n");
1400 hme_mii_writereg(device_t dev, int phy, int reg, int val)
1402 struct hme_softc *sc = device_get_softc(dev);
1406 /* Select the desired PHY in the MIF configuration register */
1407 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1408 /* Clear PHY select bit */
1409 v &= ~HME_MIF_CFG_PHY;
1410 if (phy == HME_PHYAD_EXTERNAL)
1411 /* Set PHY select bit to get at external device */
1412 v |= HME_MIF_CFG_PHY;
1413 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1415 /* Construct the frame command */
1416 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1418 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1419 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1420 (reg << HME_MIF_FO_REGAD_SHIFT) |
1421 (val & HME_MIF_FO_DATA);
1423 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1424 for (n = 0; n < 100; n++) {
1426 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1427 if (v & HME_MIF_FO_TALSB)
1431 device_printf(sc->sc_dev, "mii_write timeout\n");
1436 hme_mii_statchg(device_t dev)
1438 struct hme_softc *sc = device_get_softc(dev);
1443 instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1444 phy = sc->sc_phys[instance];
1447 printf("hme_mii_statchg: status change: phy = %d\n", phy);
1450 /* Select the current PHY in the MIF configuration register */
1451 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1452 v &= ~HME_MIF_CFG_PHY;
1453 if (phy == HME_PHYAD_EXTERNAL)
1454 v |= HME_MIF_CFG_PHY;
1455 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1457 /* Set the MAC Full Duplex bit appropriately */
1458 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1459 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
1461 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1462 v |= HME_MAC_TXCFG_FULLDPLX;
1464 v &= ~HME_MAC_TXCFG_FULLDPLX;
1465 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
1466 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
1471 hme_mediachange(struct ifnet *ifp)
1473 struct hme_softc *sc = ifp->if_softc;
1477 error = mii_mediachg(sc->sc_mii);
1483 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1485 struct hme_softc *sc = ifp->if_softc;
1488 if ((ifp->if_flags & IFF_UP) == 0) {
1493 mii_pollstat(sc->sc_mii);
1494 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1495 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1500 * Process an ioctl request.
1503 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1505 struct hme_softc *sc = ifp->if_softc;
1506 struct ifreq *ifr = (struct ifreq *)data;
1512 if ((ifp->if_flags & IFF_UP) == 0 &&
1513 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1515 * If interface is marked down and it is running, then
1519 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1520 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1522 * If interface is marked up and it is stopped, then
1525 hme_init_locked(sc);
1526 } else if ((ifp->if_flags & IFF_UP) != 0) {
1528 * Reset the interface to pick up changes in any other
1529 * flags that affect hardware registers.
1531 hme_init_locked(sc);
1533 if ((ifp->if_flags & IFF_LINK0) != 0)
1534 sc->sc_csum_features |= CSUM_UDP;
1536 sc->sc_csum_features &= ~CSUM_UDP;
1537 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1538 ifp->if_hwassist = sc->sc_csum_features;
1540 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1548 hme_setladrf(sc, 1);
1554 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1558 ifp->if_capenable = ifr->ifr_reqcap;
1559 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1560 ifp->if_hwassist = sc->sc_csum_features;
1562 ifp->if_hwassist = 0;
1566 error = ether_ioctl(ifp, cmd, data);
1574 * Set up the logical address filter.
1577 hme_setladrf(struct hme_softc *sc, int reenable)
1579 struct ifnet *ifp = sc->sc_ifp;
1580 struct ifmultiaddr *inm;
1585 HME_LOCK_ASSERT(sc, MA_OWNED);
1586 /* Clear hash table */
1587 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1589 /* Get current RX configuration */
1590 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1593 * Disable the receiver while changing it's state as the documentation
1595 * We then must wait until the bit clears in the register. This should
1596 * take at most 3.5ms.
1598 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
1600 /* Disable the hash filter before writing to the filter registers. */
1601 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1602 HME_MAC_RXCFG_HENABLE, 0))
1605 /* make RXMAC really SIMPLEX */
1606 macc |= HME_MAC_RXCFG_ME;
1608 macc |= HME_MAC_RXCFG_ENABLE;
1610 macc &= ~HME_MAC_RXCFG_ENABLE;
1612 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1613 /* Turn on promiscuous mode; turn off the hash filter */
1614 macc |= HME_MAC_RXCFG_PMISC;
1615 macc &= ~HME_MAC_RXCFG_HENABLE;
1616 ifp->if_flags |= IFF_ALLMULTI;
1620 /* Turn off promiscuous mode; turn on the hash filter */
1621 macc &= ~HME_MAC_RXCFG_PMISC;
1622 macc |= HME_MAC_RXCFG_HENABLE;
1625 * Set up multicast address filter by passing all multicast addresses
1626 * through a crc generator, and then using the high order 6 bits as an
1627 * index into the 64 bit logical address filter. The high order bit
1628 * selects the word, while the rest of the bits select the bit within
1632 IF_ADDR_LOCK(sc->sc_ifp);
1633 TAILQ_FOREACH(inm, &sc->sc_ifp->if_multiaddrs, ifma_link) {
1634 if (inm->ifma_addr->sa_family != AF_LINK)
1636 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1637 inm->ifma_addr), ETHER_ADDR_LEN);
1639 /* Just want the 6 most significant bits. */
1642 /* Set the corresponding bit in the filter. */
1643 hash[crc >> 4] |= 1 << (crc & 0xf);
1645 IF_ADDR_UNLOCK(sc->sc_ifp);
1647 ifp->if_flags &= ~IFF_ALLMULTI;
1650 /* Now load the hash table into the chip */
1651 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1652 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1653 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1654 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1655 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1656 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |