2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
6 * This code is derived from software contributed to The NetBSD Foundation
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
37 * from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 * HME Ethernet module driver.
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58 * In reality, we can do the same technique for UDP datagram too. However,
59 * the hardware doesn't compensate the checksum for UDP datagram which can yield
60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61 * can be reactivated by setting special link option link0 with ifconfig(8).
63 #define HME_CSUM_FEATURES (CSUM_TCP)
67 #define KTR_HME KTR_SPARE2 /* XXX */
69 #include <sys/param.h>
70 #include <sys/systm.h>
72 #include <sys/endian.h>
73 #include <sys/kernel.h>
74 #include <sys/module.h>
77 #include <sys/malloc.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
82 #include <net/ethernet.h>
84 #include <net/if_arp.h>
85 #include <net/if_dl.h>
86 #include <net/if_media.h>
87 #include <net/if_types.h>
88 #include <net/if_vlan_var.h>
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/tcp.h>
94 #include <netinet/udp.h>
96 #include <dev/mii/mii.h>
97 #include <dev/mii/miivar.h>
99 #include <machine/bus.h>
101 #include <dev/hme/if_hmereg.h>
102 #include <dev/hme/if_hmevar.h>
104 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
105 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
107 static void hme_start(struct ifnet *);
108 static void hme_start_locked(struct ifnet *);
109 static void hme_stop(struct hme_softc *);
110 static int hme_ioctl(struct ifnet *, u_long, caddr_t);
111 static void hme_tick(void *);
112 static int hme_watchdog(struct hme_softc *);
113 static void hme_init(void *);
114 static void hme_init_locked(struct hme_softc *);
115 static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
116 static int hme_meminit(struct hme_softc *);
117 static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
118 u_int32_t, u_int32_t);
119 static void hme_mifinit(struct hme_softc *);
120 static void hme_setladrf(struct hme_softc *, int);
122 static int hme_mediachange(struct ifnet *);
123 static int hme_mediachange_locked(struct hme_softc *);
124 static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
126 static int hme_load_txmbuf(struct hme_softc *, struct mbuf **);
127 static void hme_read(struct hme_softc *, int, int, u_int32_t);
128 static void hme_eint(struct hme_softc *, u_int);
129 static void hme_rint(struct hme_softc *);
130 static void hme_tint(struct hme_softc *);
131 static void hme_rxcksum(struct mbuf *, u_int32_t);
133 static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
135 devclass_t hme_devclass;
139 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
140 MODULE_DEPEND(hme, miibus, 1, 1, 1);
142 #define HME_SPC_READ_4(spc, sc, offs) \
143 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
145 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
146 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
148 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
149 bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
152 #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
153 #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
154 #define HME_SEB_BARRIER(sc, offs, l, f) \
155 HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
156 #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
157 #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
158 #define HME_ERX_BARRIER(sc, offs, l, f) \
159 HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
160 #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
161 #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
162 #define HME_ETX_BARRIER(sc, offs, l, f) \
163 HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
164 #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
165 #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
166 #define HME_MAC_BARRIER(sc, offs, l, f) \
167 HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
168 #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
169 #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
170 #define HME_MIF_BARRIER(sc, offs, l, f) \
171 HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
174 #define HME_WHINE(dev, ...) do { \
175 if (hme_nerr++ < HME_MAXERR) \
176 device_printf(dev, __VA_ARGS__); \
177 if (hme_nerr == HME_MAXERR) { \
178 device_printf(dev, "too many errors; not reporting " \
183 /* Support oversized VLAN frames. */
184 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
187 hme_config(struct hme_softc *sc)
190 struct mii_softc *child;
192 int error, rdesc, tdesc, i;
194 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
199 * HME common initialization.
201 * hme_softc fields that must be initialized by the front-end:
206 * the bus handles, tags and offsets (splitted for SBus compatibility):
207 * sc_seb{t,h,o} (Shared Ethernet Block registers)
208 * sc_erx{t,h,o} (Receiver Unit registers)
209 * sc_etx{t,h,o} (Transmitter Unit registers)
210 * sc_mac{t,h,o} (MAC registers)
211 * sc_mif{t,h,o} (Management Interface registers)
213 * the maximum bus burst size:
218 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
220 /* Make sure the chip is stopped. */
225 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
226 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
227 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
228 NULL, NULL, &sc->sc_pdmatag);
233 * Create control, RX and TX mbuf DMA tags.
234 * Buffer descriptors must be aligned on a 2048 byte boundary;
235 * take this into account when calculating the size. Note that
236 * the maximum number of descriptors (256) occupies 2048 bytes,
237 * so we allocate that much regardless of HME_N*DESC.
240 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
241 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
242 1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
246 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
247 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
248 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
252 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
253 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
254 MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
255 NULL, NULL, &sc->sc_tdmatag);
259 /* Allocate the control DMA buffer. */
260 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
261 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
263 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
267 /* Load the control DMA buffer. */
268 sc->sc_rb.rb_dmabase = 0;
269 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
270 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
271 sc->sc_rb.rb_dmabase == 0) {
272 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
276 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
277 sc->sc_rb.rb_dmabase);
280 * Prepare the RX descriptors. rdesc serves as marker for the last
281 * processed descriptor and may be used later on.
283 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
284 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
285 error = bus_dmamap_create(sc->sc_rdmatag, 0,
286 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
290 error = bus_dmamap_create(sc->sc_rdmatag, 0,
291 &sc->sc_rb.rb_spare_dmamap);
294 /* Same for the TX descs. */
295 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
296 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
297 error = bus_dmamap_create(sc->sc_tdmatag, 0,
298 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
303 sc->sc_csum_features = HME_CSUM_FEATURES;
304 /* Initialize ifnet structure. */
306 if_initname(ifp, device_get_name(sc->sc_dev),
307 device_get_unit(sc->sc_dev));
308 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
309 ifp->if_start = hme_start;
310 ifp->if_ioctl = hme_ioctl;
311 ifp->if_init = hme_init;
312 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
313 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
314 IFQ_SET_READY(&ifp->if_snd);
319 * DP83840A used with HME chips don't advertise their media
320 * capabilities themselves properly so force writing the ANAR
321 * according to the BMSR in mii_phy_setmedia().
323 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
324 hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL,
325 MII_OFFSET_ANY, MIIF_FORCEANEG);
326 i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
327 hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL,
328 MII_OFFSET_ANY, MIIF_FORCEANEG);
329 if (error != 0 && i != 0) {
331 device_printf(sc->sc_dev, "attaching PHYs failed\n");
334 sc->sc_mii = device_get_softc(sc->sc_miibus);
337 * Walk along the list of attached MII devices and
338 * establish an `MII instance' to `PHY number'
339 * mapping. We'll use this mapping to enable the MII
340 * drivers of the external transceiver according to
341 * the currently selected media.
343 sc->sc_phys[0] = sc->sc_phys[1] = -1;
344 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
346 * Note: we support just two PHYs: the built-in
347 * internal device and an external on the MII
350 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
351 child->mii_phy != HME_PHYAD_INTERNAL) ||
352 child->mii_inst > 1) {
353 device_printf(sc->sc_dev, "cannot accommodate "
354 "MII device %s at phy %d, instance %d\n",
355 device_get_name(child->mii_dev),
356 child->mii_phy, child->mii_inst);
360 sc->sc_phys[child->mii_inst] = child->mii_phy;
363 /* Attach the interface. */
364 ether_ifattach(ifp, sc->sc_enaddr);
367 * Tell the upper layer(s) we support long frames/checksum offloads.
369 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
370 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
371 ifp->if_hwassist |= sc->sc_csum_features;
372 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
376 for (i = 0; i < tdesc; i++) {
377 bus_dmamap_destroy(sc->sc_tdmatag,
378 sc->sc_rb.rb_txdesc[i].htx_dmamap);
380 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
382 for (i = 0; i < rdesc; i++) {
383 bus_dmamap_destroy(sc->sc_rdmatag,
384 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
386 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
388 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
390 bus_dma_tag_destroy(sc->sc_tdmatag);
392 bus_dma_tag_destroy(sc->sc_rdmatag);
394 bus_dma_tag_destroy(sc->sc_cdmatag);
396 bus_dma_tag_destroy(sc->sc_pdmatag);
403 hme_detach(struct hme_softc *sc)
405 struct ifnet *ifp = sc->sc_ifp;
411 callout_drain(&sc->sc_tick_ch);
414 device_delete_child(sc->sc_dev, sc->sc_miibus);
416 for (i = 0; i < HME_NTXQ; i++) {
417 bus_dmamap_destroy(sc->sc_tdmatag,
418 sc->sc_rb.rb_txdesc[i].htx_dmamap);
420 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
421 for (i = 0; i < HME_NRXDESC; i++) {
422 bus_dmamap_destroy(sc->sc_rdmatag,
423 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
425 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
426 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
427 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
428 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
429 bus_dma_tag_destroy(sc->sc_tdmatag);
430 bus_dma_tag_destroy(sc->sc_rdmatag);
431 bus_dma_tag_destroy(sc->sc_cdmatag);
432 bus_dma_tag_destroy(sc->sc_pdmatag);
436 hme_suspend(struct hme_softc *sc)
445 hme_resume(struct hme_softc *sc)
447 struct ifnet *ifp = sc->sc_ifp;
450 if ((ifp->if_flags & IFF_UP) != 0)
456 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
458 struct hme_softc *sc = (struct hme_softc *)xsc;
463 ("%s: too many DMA segments (%d)", __func__, nsegs));
464 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
470 struct hme_softc *sc = arg;
473 HME_LOCK_ASSERT(sc, MA_OWNED);
477 * Unload collision counters
479 ifp->if_collisions +=
480 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
481 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
482 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
483 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
486 * then clear the hardware counters.
488 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
489 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
490 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
491 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
493 mii_tick(sc->sc_mii);
495 if (hme_watchdog(sc) == EJUSTRETURN)
498 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
502 hme_stop(struct hme_softc *sc)
507 callout_stop(&sc->sc_tick_ch);
508 sc->sc_wdog_timer = 0;
509 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
510 sc->sc_flags &= ~HME_LINK;
512 /* Mask all interrupts */
513 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
515 /* Reset transmitter and receiver */
516 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
518 HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
519 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
520 for (n = 0; n < 20; n++) {
521 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
522 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
527 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
531 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
532 * ring for subsequent use.
535 hme_discard_rxbuf(struct hme_softc *sc, int ix)
539 * Dropped a packet, reinitialize the descriptor and turn the
540 * ownership back to the hardware.
542 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
543 ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
544 &sc->sc_rb.rb_rxdesc[ix])));
548 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
550 struct hme_rxdesc *rd;
552 bus_dma_segment_t segs[1];
557 rd = &sc->sc_rb.rb_rxdesc[ri];
558 unmap = rd->hrx_m != NULL;
559 if (unmap && keepold) {
561 * Reinitialize the descriptor flags, as they may have been
562 * altered by the hardware.
564 hme_discard_rxbuf(sc, ri);
567 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
569 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
570 b = mtod(m, uintptr_t);
572 * Required alignment boundary. At least 16 is needed, but since
573 * the mapping must be done in a way that a burst can start on a
574 * natural boundary we might need to extend this.
576 a = imax(HME_MINRXALIGN, sc->sc_burst);
578 * Make sure the buffer suitably aligned. The 2 byte offset is removed
579 * when the mbuf is handed up. XXX: this ensures at least 16 byte
580 * alignment of the header adjacent to the ethernet header, which
581 * should be sufficient in all cases. Nevertheless, this second-guesses
584 m_adj(m, roundup2(b, a) - b);
585 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
586 m, segs, &nsegs, 0) != 0) {
590 /* If nsegs is wrong then the stack is corrupt. */
592 ("%s: too many DMA segments (%d)", __func__, nsegs));
594 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
595 BUS_DMASYNC_POSTREAD);
596 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
598 map = rd->hrx_dmamap;
599 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
600 sc->sc_rb.rb_spare_dmamap = map;
601 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
602 HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
605 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
606 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
611 hme_meminit(struct hme_softc *sc)
613 struct hme_ring *hr = &sc->sc_rb;
614 struct hme_txdesc *td;
621 dma = hr->rb_dmabase;
624 * Allocate transmit descriptors
628 p += HME_NTXDESC * HME_XD_SIZE;
629 dma += HME_NTXDESC * HME_XD_SIZE;
631 * We have reserved descriptor space until the next 2048 byte
634 dma = (bus_addr_t)roundup((u_long)dma, 2048);
635 p = (caddr_t)roundup((u_long)p, 2048);
638 * Allocate receive descriptors
642 p += HME_NRXDESC * HME_XD_SIZE;
643 dma += HME_NRXDESC * HME_XD_SIZE;
644 /* Again move forward to the next 2048 byte boundary.*/
645 dma = (bus_addr_t)roundup((u_long)dma, 2048);
646 p = (caddr_t)roundup((u_long)p, 2048);
649 * Initialize transmit buffer descriptors
651 for (i = 0; i < HME_NTXDESC; i++) {
652 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
653 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
656 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
657 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
658 for (i = 0; i < HME_NTXQ; i++) {
659 td = &sc->sc_rb.rb_txdesc[i];
660 if (td->htx_m != NULL) {
661 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
662 BUS_DMASYNC_POSTWRITE);
663 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
667 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
671 * Initialize receive buffer descriptors
673 for (i = 0; i < HME_NRXDESC; i++) {
674 error = hme_add_rxbuf(sc, i, 1);
679 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
682 hr->rb_tdhead = hr->rb_tdtail = 0;
685 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
687 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
689 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
690 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
691 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
692 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
697 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
698 u_int32_t clr, u_int32_t set)
704 HME_MAC_WRITE_4(sc, reg, val);
705 HME_MAC_BARRIER(sc, reg, 4,
706 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
707 if (clr == 0 && set == 0)
708 return (1); /* just write, no bits to wait for */
712 val = HME_MAC_READ_4(sc, reg);
714 /* After 3.5ms, we should have been done. */
715 device_printf(sc->sc_dev, "timeout while writing to "
716 "MAC configuration register\n");
719 } while ((val & clr) != 0 && (val & set) != set);
724 * Initialization of interface; set up initialization block
725 * and transmit/receive descriptor rings.
730 struct hme_softc *sc = (struct hme_softc *)xsc;
738 hme_init_locked(struct hme_softc *sc)
740 struct ifnet *ifp = sc->sc_ifp;
744 HME_LOCK_ASSERT(sc, MA_OWNED);
746 * Initialization sequence. The numbered steps below correspond
747 * to the sequence outlined in section 6.3.5.1 in the Ethernet
748 * Channel Engine manual (part of the PCIO manual).
749 * See also the STP2002-STQ document from Sun Microsystems.
752 /* step 1 & 2. Reset the Ethernet Channel */
755 /* Re-initialize the MIF */
759 /* Mask all MIF interrupts, just in case */
760 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
763 /* step 3. Setup data structures in host memory */
764 if (hme_meminit(sc) != 0) {
765 device_printf(sc->sc_dev, "out of buffers; init aborted.");
769 /* step 4. TX MAC registers & counters */
770 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
771 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
772 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
773 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
774 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
776 /* Load station MAC address */
778 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
779 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
780 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
783 * Init seed for backoff
784 * (source suggested by manual: low 10 bits of MAC address)
786 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
787 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
789 /* Note: Accepting power-on default for other MAC registers here.. */
791 /* step 5. RX MAC registers & counters */
794 /* step 6 & 7. Program Descriptor Ring Base Addresses */
795 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
796 /* Transmit Descriptor ring size: in increments of 16 */
797 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
799 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
800 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
802 /* step 8. Global Configuration & Interrupt Mask */
803 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
804 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
805 HME_SEB_STAT_HOSTTOTX |
806 HME_SEB_STAT_RXTOHOST |
808 HME_SEB_STAT_TXPERR |
809 HME_SEB_STAT_RCNTEXP |
810 HME_SEB_STAT_ALL_ERRORS ));
812 switch (sc->sc_burst) {
817 v = HME_SEB_CFG_BURST16;
820 v = HME_SEB_CFG_BURST32;
823 v = HME_SEB_CFG_BURST64;
827 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
828 * Allowing 64bit transfers breaks TX checksum offload as well.
829 * Don't know this comes from hardware bug or driver's DMAing
832 * if (sc->sc_flags & HME_PCI == 0)
833 * v |= HME_SEB_CFG_64BIT;
835 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
837 /* step 9. ETX Configuration: use mostly default values */
840 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
841 v |= HME_ETX_CFG_DMAENABLE;
842 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
844 /* step 10. ERX Configuration */
845 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
847 /* Encode Receive Descriptor ring size: four possible values */
848 v &= ~HME_ERX_CFG_RINGSIZEMSK;
849 switch (HME_NRXDESC) {
851 v |= HME_ERX_CFG_RINGSIZE32;
854 v |= HME_ERX_CFG_RINGSIZE64;
857 v |= HME_ERX_CFG_RINGSIZE128;
860 v |= HME_ERX_CFG_RINGSIZE256;
863 printf("hme: invalid Receive Descriptor ring size\n");
867 /* Enable DMA, fix RX first byte offset. */
868 v &= ~HME_ERX_CFG_FBO_MASK;
869 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
870 /* RX TCP/UDP checksum offset */
871 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
872 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
874 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
875 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
877 /* step 11. XIF Configuration */
878 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
880 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
881 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
883 /* step 12. RX_MAC Configuration Register */
884 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
885 v |= HME_MAC_RXCFG_ENABLE;
886 v &= ~(HME_MAC_RXCFG_DCRCS);
887 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
888 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
890 /* step 13. TX_MAC Configuration Register */
891 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
892 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
893 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
894 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
896 /* step 14. Issue Transmit Pending command */
899 /* Debug: double-check. */
900 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
901 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
902 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
903 HME_ERX_READ_4(sc, HME_ERXI_RING),
904 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
905 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
906 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
907 HME_ERX_READ_4(sc, HME_ERXI_CFG),
908 HME_ETX_READ_4(sc, HME_ETXI_CFG));
909 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
910 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
911 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
914 ifp->if_drv_flags |= IFF_DRV_RUNNING;
915 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
917 /* Set the current media. */
918 hme_mediachange_locked(sc);
920 /* Start the one second timer. */
921 sc->sc_wdog_timer = 0;
922 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
926 * Routine to DMA map an mbuf chain, set up the descriptor rings
927 * accordingly and start the transmission.
928 * Returns 0 on success, -1 if there were not enough free descriptors
929 * to map the packet, or an errno otherwise.
931 * XXX: this relies on the fact that segments returned by
932 * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
933 * boundary on (i.e. potentially before ds_addr) to the first
934 * boundary beyond the end. This is usually a safe assumption to
935 * make, but is not documented.
938 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
940 bus_dma_segment_t segs[HME_NTXSEGS];
941 struct hme_txdesc *htx;
945 int error, i, nsegs, pci, ri, si;
946 uint32_t cflags, flags;
948 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
952 if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
953 if (M_WRITABLE(*m0) == 0) {
954 m = m_dup(*m0, M_DONTWAIT);
960 i = sizeof(struct ether_header);
961 m = m_pullup(*m0, i + sizeof(struct ip));
966 ip = (struct ip *)(mtod(m, caddr_t) + i);
967 i += (ip->ip_hl << 2);
968 cflags = i << HME_XD_TXCKSUM_SSHIFT |
969 ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
974 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
975 *m0, segs, &nsegs, 0);
976 if (error == EFBIG) {
977 m = m_collapse(*m0, M_DONTWAIT, HME_NTXSEGS);
984 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
985 *m0, segs, &nsegs, 0);
991 } else if (error != 0)
993 /* If nsegs is wrong then the stack is corrupt. */
994 KASSERT(nsegs <= HME_NTXSEGS,
995 ("%s: too many DMA segments (%d)", __func__, nsegs));
1001 if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
1002 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1003 /* Retry with m_collapse(9)? */
1006 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
1008 si = ri = sc->sc_rb.rb_tdhead;
1009 txd = sc->sc_rb.rb_txd;
1010 pci = sc->sc_flags & HME_PCI;
1011 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
1012 HME_XD_GETFLAGS(pci, txd, ri));
1013 for (i = 0; i < nsegs; i++) {
1014 /* Fill the ring entry. */
1015 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
1017 flags |= HME_XD_SOP | cflags;
1019 flags |= HME_XD_OWN | cflags;
1020 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
1022 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
1023 HME_XD_SETFLAGS(pci, txd, ri, flags);
1024 sc->sc_rb.rb_td_nbusy++;
1025 htx->htx_lastdesc = ri;
1026 ri = (ri + 1) % HME_NTXDESC;
1028 sc->sc_rb.rb_tdhead = ri;
1030 /* set EOP on the last descriptor */
1031 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1032 flags = HME_XD_GETFLAGS(pci, txd, ri);
1033 flags |= HME_XD_EOP;
1034 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1036 HME_XD_SETFLAGS(pci, txd, ri, flags);
1038 /* Turn the first descriptor ownership to the hme */
1039 flags = HME_XD_GETFLAGS(pci, txd, si);
1040 flags |= HME_XD_OWN;
1041 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1043 HME_XD_SETFLAGS(pci, txd, si, flags);
1045 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1046 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1049 /* start the transmission. */
1050 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1056 * Pass a packet to the higher levels.
1059 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1061 struct ifnet *ifp = sc->sc_ifp;
1064 if (len <= sizeof(struct ether_header) ||
1065 len > HME_MAX_FRAMESIZE) {
1067 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1071 hme_discard_rxbuf(sc, ix);
1075 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1076 CTR1(KTR_HME, "hme_read: len %d", len);
1078 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1080 * hme_add_rxbuf will leave the old buffer in the ring until
1081 * it is sure that a new buffer can be mapped. If it can not,
1082 * drop the packet, but leave the interface up.
1085 hme_discard_rxbuf(sc, ix);
1091 m->m_pkthdr.rcvif = ifp;
1092 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1093 m_adj(m, HME_RXOFFS);
1094 /* RX TCP/UDP checksum */
1095 if (ifp->if_capenable & IFCAP_RXCSUM)
1096 hme_rxcksum(m, flags);
1097 /* Pass the packet up. */
1099 (*ifp->if_input)(ifp, m);
1104 hme_start(struct ifnet *ifp)
1106 struct hme_softc *sc = ifp->if_softc;
1109 hme_start_locked(ifp);
1114 hme_start_locked(struct ifnet *ifp)
1116 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1120 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1121 IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
1124 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1125 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1126 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1130 error = hme_load_txmbuf(sc, &m);
1134 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1135 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1143 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1144 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1145 sc->sc_wdog_timer = 5;
1150 * Transmit interrupt.
1153 hme_tint(struct hme_softc *sc)
1156 struct ifnet *ifp = sc->sc_ifp;
1157 struct hme_txdesc *htx;
1158 unsigned int ri, txflags;
1160 txd = sc->sc_rb.rb_txd;
1161 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1162 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1163 /* Fetch current position in the transmit ring */
1164 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1165 if (sc->sc_rb.rb_td_nbusy <= 0) {
1166 CTR0(KTR_HME, "hme_tint: not busy!");
1170 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
1171 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1173 if ((txflags & HME_XD_OWN) != 0)
1176 CTR0(KTR_HME, "hme_tint: not owned");
1177 --sc->sc_rb.rb_td_nbusy;
1178 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1180 /* Complete packet transmitted? */
1181 if ((txflags & HME_XD_EOP) == 0)
1184 KASSERT(htx->htx_lastdesc == ri,
1185 ("%s: ring indices skewed: %d != %d!",
1186 __func__, htx->htx_lastdesc, ri));
1187 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1188 BUS_DMASYNC_POSTWRITE);
1189 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1192 m_freem(htx->htx_m);
1194 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1195 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1196 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1198 sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
1201 sc->sc_rb.rb_tdtail = ri;
1203 hme_start_locked(ifp);
1207 * RX TCP/UDP checksum
1210 hme_rxcksum(struct mbuf *m, u_int32_t flags)
1212 struct ether_header *eh;
1215 int32_t hlen, len, pktlen;
1216 u_int16_t cksum, *opts;
1219 pktlen = m->m_pkthdr.len;
1220 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1222 eh = mtod(m, struct ether_header *);
1223 if (eh->ether_type != htons(ETHERTYPE_IP))
1225 ip = (struct ip *)(eh + 1);
1226 if (ip->ip_v != IPVERSION)
1229 hlen = ip->ip_hl << 2;
1230 pktlen -= sizeof(struct ether_header);
1231 if (hlen < sizeof(struct ip))
1233 if (ntohs(ip->ip_len) < hlen)
1235 if (ntohs(ip->ip_len) != pktlen)
1237 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1238 return; /* can't handle fragmented packet */
1242 if (pktlen < (hlen + sizeof(struct tcphdr)))
1246 if (pktlen < (hlen + sizeof(struct udphdr)))
1248 uh = (struct udphdr *)((caddr_t)ip + hlen);
1249 if (uh->uh_sum == 0)
1250 return; /* no checksum */
1256 cksum = ~(flags & HME_XD_RXCKSUM);
1257 /* checksum fixup for IP options */
1258 len = hlen - sizeof(struct ip);
1260 opts = (u_int16_t *)(ip + 1);
1261 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1262 temp32 = cksum - *opts;
1263 temp32 = (temp32 >> 16) + (temp32 & 65535);
1264 cksum = temp32 & 65535;
1267 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1268 m->m_pkthdr.csum_data = cksum;
1272 * Receive interrupt.
1275 hme_rint(struct hme_softc *sc)
1277 caddr_t xdr = sc->sc_rb.rb_rxd;
1278 struct ifnet *ifp = sc->sc_ifp;
1279 unsigned int ri, len;
1284 * Process all buffers with valid data.
1286 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1287 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1288 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
1289 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1290 if ((flags & HME_XD_OWN) != 0)
1294 if ((flags & HME_XD_OFL) != 0) {
1295 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1296 "flags=0x%x\n", ri, flags);
1298 hme_discard_rxbuf(sc, ri);
1300 len = HME_XD_DECODE_RSIZE(flags);
1301 hme_read(sc, ri, len, flags);
1305 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1308 sc->sc_rb.rb_rdtail = ri;
1312 hme_eint(struct hme_softc *sc, u_int status)
1315 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1316 device_printf(sc->sc_dev, "XXXlink status changed: "
1317 "cfg=%#x, stat=%#x, sm=%#x\n",
1318 HME_MIF_READ_4(sc, HME_MIFI_CFG),
1319 HME_MIF_READ_4(sc, HME_MIFI_STAT),
1320 HME_MIF_READ_4(sc, HME_MIFI_SM));
1324 /* check for fatal errors that needs reset to unfreeze DMA engine */
1325 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1326 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1327 hme_init_locked(sc);
1334 struct hme_softc *sc = (struct hme_softc *)v;
1338 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1339 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1341 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1342 hme_eint(sc, status);
1344 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1347 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1353 hme_watchdog(struct hme_softc *sc)
1355 struct ifnet *ifp = sc->sc_ifp;
1357 HME_LOCK_ASSERT(sc, MA_OWNED);
1360 CTR1(KTR_HME, "hme_watchdog: status %x",
1361 (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
1364 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1367 if ((sc->sc_flags & HME_LINK) != 0)
1368 device_printf(sc->sc_dev, "device timeout\n");
1369 else if (bootverbose)
1370 device_printf(sc->sc_dev, "device timeout (no link)\n");
1373 hme_init_locked(sc);
1374 hme_start_locked(ifp);
1375 return (EJUSTRETURN);
1379 * Initialize the MII Management Interface
1382 hme_mifinit(struct hme_softc *sc)
1387 * Configure the MIF in frame mode, polling disabled, internal PHY
1390 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
1393 * If the currently selected media uses the external transceiver,
1394 * enable its MII drivers (which basically isolates the internal
1395 * one and vice versa). In case the current media hasn't been set,
1396 * yet, we default to the internal transceiver.
1398 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
1399 if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
1400 sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
1402 v |= HME_MAC_XIF_MIIENABLE;
1404 v &= ~HME_MAC_XIF_MIIENABLE;
1405 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
1412 hme_mii_readreg(device_t dev, int phy, int reg)
1414 struct hme_softc *sc;
1418 sc = device_get_softc(dev);
1419 /* Select the desired PHY in the MIF configuration register */
1420 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1421 if (phy == HME_PHYAD_EXTERNAL)
1422 v |= HME_MIF_CFG_PHY;
1424 v &= ~HME_MIF_CFG_PHY;
1425 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1427 /* Construct the frame command */
1428 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1430 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1431 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1432 (reg << HME_MIF_FO_REGAD_SHIFT);
1434 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1435 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1436 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1437 for (n = 0; n < 100; n++) {
1439 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1440 if (v & HME_MIF_FO_TALSB)
1441 return (v & HME_MIF_FO_DATA);
1444 device_printf(sc->sc_dev, "mii_read timeout\n");
1449 hme_mii_writereg(device_t dev, int phy, int reg, int val)
1451 struct hme_softc *sc;
1455 sc = device_get_softc(dev);
1456 /* Select the desired PHY in the MIF configuration register */
1457 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1458 if (phy == HME_PHYAD_EXTERNAL)
1459 v |= HME_MIF_CFG_PHY;
1461 v &= ~HME_MIF_CFG_PHY;
1462 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1464 /* Construct the frame command */
1465 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1467 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1468 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1469 (reg << HME_MIF_FO_REGAD_SHIFT) |
1470 (val & HME_MIF_FO_DATA);
1472 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1473 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1474 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1475 for (n = 0; n < 100; n++) {
1477 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1478 if (v & HME_MIF_FO_TALSB)
1482 device_printf(sc->sc_dev, "mii_write timeout\n");
1487 hme_mii_statchg(device_t dev)
1489 struct hme_softc *sc;
1490 uint32_t rxcfg, txcfg;
1492 sc = device_get_softc(dev);
1495 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1496 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
1499 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1500 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1501 sc->sc_flags |= HME_LINK;
1503 sc->sc_flags &= ~HME_LINK;
1505 txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1506 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
1507 HME_MAC_TXCFG_ENABLE, 0))
1508 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
1509 rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1510 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
1511 HME_MAC_RXCFG_ENABLE, 0))
1512 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1514 /* Set the MAC Full Duplex bit appropriately. */
1515 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1516 txcfg |= HME_MAC_TXCFG_FULLDPLX;
1518 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
1519 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
1521 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1522 (sc->sc_flags & HME_LINK) != 0) {
1523 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
1524 HME_MAC_TXCFG_ENABLE))
1525 device_printf(sc->sc_dev, "cannot enable TX MAC\n");
1526 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
1527 HME_MAC_RXCFG_ENABLE))
1528 device_printf(sc->sc_dev, "cannot enable RX MAC\n");
1533 hme_mediachange(struct ifnet *ifp)
1535 struct hme_softc *sc = ifp->if_softc;
1539 error = hme_mediachange_locked(sc);
1545 hme_mediachange_locked(struct hme_softc *sc)
1547 struct mii_softc *child;
1549 HME_LOCK_ASSERT(sc, MA_OWNED);
1552 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1553 device_printf(sc->sc_dev, "hme_mediachange_locked");
1559 * If both PHYs are present reset them. This is required for
1560 * unisolating the previously isolated PHY when switching PHYs.
1561 * As the above hme_mifinit() call will set the MII drivers in
1562 * the XIF configuration register according to the currently
1563 * selected media, there should be no window during which the
1564 * data paths of both transceivers are open at the same time,
1565 * even if the PHY device drivers use MIIF_NOISOLATE.
1567 if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
1568 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
1570 return (mii_mediachg(sc->sc_mii));
1574 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1576 struct hme_softc *sc = ifp->if_softc;
1579 if ((ifp->if_flags & IFF_UP) == 0) {
1584 mii_pollstat(sc->sc_mii);
1585 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1586 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1591 * Process an ioctl request.
1594 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1596 struct hme_softc *sc = ifp->if_softc;
1597 struct ifreq *ifr = (struct ifreq *)data;
1603 if ((ifp->if_flags & IFF_UP) != 0) {
1604 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1605 ((ifp->if_flags ^ sc->sc_ifflags) &
1606 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1607 hme_setladrf(sc, 1);
1609 hme_init_locked(sc);
1610 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1612 if ((ifp->if_flags & IFF_LINK0) != 0)
1613 sc->sc_csum_features |= CSUM_UDP;
1615 sc->sc_csum_features &= ~CSUM_UDP;
1616 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1617 ifp->if_hwassist = sc->sc_csum_features;
1618 sc->sc_ifflags = ifp->if_flags;
1625 hme_setladrf(sc, 1);
1631 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1635 ifp->if_capenable = ifr->ifr_reqcap;
1636 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1637 ifp->if_hwassist = sc->sc_csum_features;
1639 ifp->if_hwassist = 0;
1643 error = ether_ioctl(ifp, cmd, data);
1651 * Set up the logical address filter.
1654 hme_setladrf(struct hme_softc *sc, int reenable)
1656 struct ifnet *ifp = sc->sc_ifp;
1657 struct ifmultiaddr *inm;
1662 HME_LOCK_ASSERT(sc, MA_OWNED);
1663 /* Clear the hash table. */
1664 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1666 /* Get the current RX configuration. */
1667 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1670 * Turn off promiscuous mode, promiscuous group mode (all multicast),
1671 * and hash filter. Depending on the case, the right bit will be
1674 macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
1677 * Disable the receiver while changing it's state as the documentation
1679 * We then must wait until the bit clears in the register. This should
1680 * take at most 3.5ms.
1682 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1683 HME_MAC_RXCFG_ENABLE, 0))
1684 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1685 /* Disable the hash filter before writing to the filter registers. */
1686 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1687 HME_MAC_RXCFG_HENABLE, 0))
1688 device_printf(sc->sc_dev, "cannot disable hash filter\n");
1690 /* Make the RX MAC really SIMPLEX. */
1691 macc |= HME_MAC_RXCFG_ME;
1693 macc |= HME_MAC_RXCFG_ENABLE;
1695 macc &= ~HME_MAC_RXCFG_ENABLE;
1697 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1698 macc |= HME_MAC_RXCFG_PMISC;
1701 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1702 macc |= HME_MAC_RXCFG_PGRP;
1706 macc |= HME_MAC_RXCFG_HENABLE;
1709 * Set up multicast address filter by passing all multicast addresses
1710 * through a crc generator, and then using the high order 6 bits as an
1711 * index into the 64 bit logical address filter. The high order bit
1712 * selects the word, while the rest of the bits select the bit within
1716 if_maddr_rlock(ifp);
1717 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
1718 if (inm->ifma_addr->sa_family != AF_LINK)
1720 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1721 inm->ifma_addr), ETHER_ADDR_LEN);
1723 /* Just want the 6 most significant bits. */
1726 /* Set the corresponding bit in the filter. */
1727 hash[crc >> 4] |= 1 << (crc & 0xf);
1729 if_maddr_runlock(ifp);
1732 /* Now load the hash table into the chip */
1733 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1734 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1735 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1736 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1737 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1738 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1740 device_printf(sc->sc_dev, "cannot configure RX MAC\n");