2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
6 * This code is derived from software contributed to The NetBSD Foundation
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
37 * from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 * HME Ethernet module driver.
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58 * In reality, we can do the same technique for UDP datagram too. However,
59 * the hardware doesn't compensate the checksum for UDP datagram which can yield
60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61 * can be reactivated by setting special link option link0 with ifconfig(8).
63 #define HME_CSUM_FEATURES (CSUM_TCP)
67 #define KTR_HME KTR_SPARE2 /* XXX */
69 #include <sys/param.h>
70 #include <sys/systm.h>
72 #include <sys/endian.h>
73 #include <sys/kernel.h>
74 #include <sys/module.h>
77 #include <sys/malloc.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
82 #include <net/ethernet.h>
84 #include <net/if_arp.h>
85 #include <net/if_dl.h>
86 #include <net/if_media.h>
87 #include <net/if_types.h>
88 #include <net/if_vlan_var.h>
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/tcp.h>
94 #include <netinet/udp.h>
96 #include <dev/mii/mii.h>
97 #include <dev/mii/miivar.h>
99 #include <machine/bus.h>
101 #include <dev/hme/if_hmereg.h>
102 #include <dev/hme/if_hmevar.h>
104 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
105 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
107 static void hme_start(struct ifnet *);
108 static void hme_start_locked(struct ifnet *);
109 static void hme_stop(struct hme_softc *);
110 static int hme_ioctl(struct ifnet *, u_long, caddr_t);
111 static void hme_tick(void *);
112 static int hme_watchdog(struct hme_softc *);
113 static void hme_init(void *);
114 static void hme_init_locked(struct hme_softc *);
115 static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
116 static int hme_meminit(struct hme_softc *);
117 static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
118 u_int32_t, u_int32_t);
119 static void hme_mifinit(struct hme_softc *);
120 static void hme_setladrf(struct hme_softc *, int);
122 static int hme_mediachange(struct ifnet *);
123 static int hme_mediachange_locked(struct hme_softc *);
124 static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
126 static int hme_load_txmbuf(struct hme_softc *, struct mbuf **);
127 static void hme_read(struct hme_softc *, int, int, u_int32_t);
128 static void hme_eint(struct hme_softc *, u_int);
129 static void hme_rint(struct hme_softc *);
130 static void hme_tint(struct hme_softc *);
131 static void hme_rxcksum(struct mbuf *, u_int32_t);
133 static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
135 devclass_t hme_devclass;
139 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
140 MODULE_DEPEND(hme, miibus, 1, 1, 1);
142 #define HME_SPC_READ_4(spc, sc, offs) \
143 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
145 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
146 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
148 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
149 bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
152 #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
153 #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
154 #define HME_SEB_BARRIER(sc, offs, l, f) \
155 HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
156 #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
157 #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
158 #define HME_ERX_BARRIER(sc, offs, l, f) \
159 HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
160 #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
161 #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
162 #define HME_ETX_BARRIER(sc, offs, l, f) \
163 HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
164 #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
165 #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
166 #define HME_MAC_BARRIER(sc, offs, l, f) \
167 HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
168 #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
169 #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
170 #define HME_MIF_BARRIER(sc, offs, l, f) \
171 HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
174 #define HME_WHINE(dev, ...) do { \
175 if (hme_nerr++ < HME_MAXERR) \
176 device_printf(dev, __VA_ARGS__); \
177 if (hme_nerr == HME_MAXERR) { \
178 device_printf(dev, "too many errors; not reporting " \
183 /* Support oversized VLAN frames. */
184 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
187 hme_config(struct hme_softc *sc)
190 struct mii_softc *child;
192 int error, rdesc, tdesc, i;
194 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
199 * HME common initialization.
201 * hme_softc fields that must be initialized by the front-end:
206 * the bus handles, tags and offsets (splitted for SBus compatibility):
207 * sc_seb{t,h,o} (Shared Ethernet Block registers)
208 * sc_erx{t,h,o} (Receiver Unit registers)
209 * sc_etx{t,h,o} (Transmitter Unit registers)
210 * sc_mac{t,h,o} (MAC registers)
211 * sc_mif{t,h,o} (Management Interface registers)
213 * the maximum bus burst size:
218 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
220 /* Make sure the chip is stopped. */
225 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
226 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
227 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
228 NULL, NULL, &sc->sc_pdmatag);
233 * Create control, RX and TX mbuf DMA tags.
234 * Buffer descriptors must be aligned on a 2048 byte boundary;
235 * take this into account when calculating the size. Note that
236 * the maximum number of descriptors (256) occupies 2048 bytes,
237 * so we allocate that much regardless of HME_N*DESC.
240 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
241 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
242 1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
246 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
247 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
248 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
252 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
253 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
254 MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
255 NULL, NULL, &sc->sc_tdmatag);
259 /* Allocate the control DMA buffer. */
260 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
261 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
263 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
267 /* Load the control DMA buffer. */
268 sc->sc_rb.rb_dmabase = 0;
269 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
270 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
271 sc->sc_rb.rb_dmabase == 0) {
272 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
276 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
277 sc->sc_rb.rb_dmabase);
280 * Prepare the RX descriptors. rdesc serves as marker for the last
281 * processed descriptor and may be used later on.
283 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
284 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
285 error = bus_dmamap_create(sc->sc_rdmatag, 0,
286 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
290 error = bus_dmamap_create(sc->sc_rdmatag, 0,
291 &sc->sc_rb.rb_spare_dmamap);
294 /* Same for the TX descs. */
295 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
296 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
297 error = bus_dmamap_create(sc->sc_tdmatag, 0,
298 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
303 sc->sc_csum_features = HME_CSUM_FEATURES;
304 /* Initialize ifnet structure. */
306 if_initname(ifp, device_get_name(sc->sc_dev),
307 device_get_unit(sc->sc_dev));
308 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
309 ifp->if_start = hme_start;
310 ifp->if_ioctl = hme_ioctl;
311 ifp->if_init = hme_init;
312 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
313 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
314 IFQ_SET_READY(&ifp->if_snd);
319 * DP83840A used with HME chips don't advertise their media
320 * capabilities themselves properly so force writing the ANAR
321 * according to the BMSR in mii_phy_setmedia().
323 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
324 hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL,
325 MII_OFFSET_ANY, MIIF_FORCEANEG);
326 i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
327 hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL,
328 MII_OFFSET_ANY, MIIF_FORCEANEG);
329 if (error != 0 && i != 0) {
331 device_printf(sc->sc_dev, "attaching PHYs failed\n");
334 sc->sc_mii = device_get_softc(sc->sc_miibus);
337 * Walk along the list of attached MII devices and
338 * establish an `MII instance' to `PHY number'
339 * mapping. We'll use this mapping to enable the MII
340 * drivers of the external transceiver according to
341 * the currently selected media.
343 sc->sc_phys[0] = sc->sc_phys[1] = -1;
344 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
346 * Note: we support just two PHYs: the built-in
347 * internal device and an external on the MII
350 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
351 child->mii_phy != HME_PHYAD_INTERNAL) ||
352 child->mii_inst > 1) {
353 device_printf(sc->sc_dev, "cannot accommodate "
354 "MII device %s at phy %d, instance %d\n",
355 device_get_name(child->mii_dev),
356 child->mii_phy, child->mii_inst);
360 sc->sc_phys[child->mii_inst] = child->mii_phy;
363 /* Attach the interface. */
364 ether_ifattach(ifp, sc->sc_enaddr);
367 * Tell the upper layer(s) we support long frames/checksum offloads.
369 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
370 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
371 ifp->if_hwassist |= sc->sc_csum_features;
372 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
376 for (i = 0; i < tdesc; i++) {
377 bus_dmamap_destroy(sc->sc_tdmatag,
378 sc->sc_rb.rb_txdesc[i].htx_dmamap);
380 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
382 for (i = 0; i < rdesc; i++) {
383 bus_dmamap_destroy(sc->sc_rdmatag,
384 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
386 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
388 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
390 bus_dma_tag_destroy(sc->sc_tdmatag);
392 bus_dma_tag_destroy(sc->sc_rdmatag);
394 bus_dma_tag_destroy(sc->sc_cdmatag);
396 bus_dma_tag_destroy(sc->sc_pdmatag);
403 hme_detach(struct hme_softc *sc)
405 struct ifnet *ifp = sc->sc_ifp;
411 callout_drain(&sc->sc_tick_ch);
414 device_delete_child(sc->sc_dev, sc->sc_miibus);
416 for (i = 0; i < HME_NTXQ; i++) {
417 bus_dmamap_destroy(sc->sc_tdmatag,
418 sc->sc_rb.rb_txdesc[i].htx_dmamap);
420 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
421 for (i = 0; i < HME_NRXDESC; i++) {
422 bus_dmamap_destroy(sc->sc_rdmatag,
423 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
425 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
426 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
427 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
428 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
429 bus_dma_tag_destroy(sc->sc_tdmatag);
430 bus_dma_tag_destroy(sc->sc_rdmatag);
431 bus_dma_tag_destroy(sc->sc_cdmatag);
432 bus_dma_tag_destroy(sc->sc_pdmatag);
436 hme_suspend(struct hme_softc *sc)
445 hme_resume(struct hme_softc *sc)
447 struct ifnet *ifp = sc->sc_ifp;
450 if ((ifp->if_flags & IFF_UP) != 0)
456 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
458 struct hme_softc *sc = (struct hme_softc *)xsc;
463 ("%s: too many DMA segments (%d)", __func__, nsegs));
464 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
470 struct hme_softc *sc = arg;
473 HME_LOCK_ASSERT(sc, MA_OWNED);
477 * Unload collision counters
479 ifp->if_collisions +=
480 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
481 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
482 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
483 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
486 * then clear the hardware counters.
488 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
489 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
490 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
491 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
493 mii_tick(sc->sc_mii);
495 if (hme_watchdog(sc) == EJUSTRETURN)
498 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
502 hme_stop(struct hme_softc *sc)
507 callout_stop(&sc->sc_tick_ch);
508 sc->sc_wdog_timer = 0;
509 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
510 sc->sc_flags &= ~HME_LINK;
512 /* Mask all interrupts */
513 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
515 /* Reset transmitter and receiver */
516 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
518 HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
519 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
520 for (n = 0; n < 20; n++) {
521 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
522 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
527 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
531 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
532 * ring for subsequent use.
535 hme_discard_rxbuf(struct hme_softc *sc, int ix)
539 * Dropped a packet, reinitialize the descriptor and turn the
540 * ownership back to the hardware.
542 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
543 ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
544 &sc->sc_rb.rb_rxdesc[ix])));
548 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
550 struct hme_rxdesc *rd;
552 bus_dma_segment_t segs[1];
557 rd = &sc->sc_rb.rb_rxdesc[ri];
558 unmap = rd->hrx_m != NULL;
559 if (unmap && keepold) {
561 * Reinitialize the descriptor flags, as they may have been
562 * altered by the hardware.
564 hme_discard_rxbuf(sc, ri);
567 if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
569 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
570 b = mtod(m, uintptr_t);
572 * Required alignment boundary. At least 16 is needed, but since
573 * the mapping must be done in a way that a burst can start on a
574 * natural boundary we might need to extend this.
576 a = imax(HME_MINRXALIGN, sc->sc_burst);
578 * Make sure the buffer suitably aligned. The 2 byte offset is removed
579 * when the mbuf is handed up. XXX: this ensures at least 16 byte
580 * alignment of the header adjacent to the ethernet header, which
581 * should be sufficient in all cases. Nevertheless, this second-guesses
584 m_adj(m, roundup2(b, a) - b);
585 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
586 m, segs, &nsegs, 0) != 0) {
590 /* If nsegs is wrong then the stack is corrupt. */
592 ("%s: too many DMA segments (%d)", __func__, nsegs));
594 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
595 BUS_DMASYNC_POSTREAD);
596 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
598 map = rd->hrx_dmamap;
599 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
600 sc->sc_rb.rb_spare_dmamap = map;
601 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
602 HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
605 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
606 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
611 hme_meminit(struct hme_softc *sc)
613 struct hme_ring *hr = &sc->sc_rb;
614 struct hme_txdesc *td;
621 dma = hr->rb_dmabase;
624 * Allocate transmit descriptors
628 p += HME_NTXDESC * HME_XD_SIZE;
629 dma += HME_NTXDESC * HME_XD_SIZE;
631 * We have reserved descriptor space until the next 2048 byte
634 dma = (bus_addr_t)roundup((u_long)dma, 2048);
635 p = (caddr_t)roundup((u_long)p, 2048);
638 * Allocate receive descriptors
642 p += HME_NRXDESC * HME_XD_SIZE;
643 dma += HME_NRXDESC * HME_XD_SIZE;
644 /* Again move forward to the next 2048 byte boundary.*/
645 dma = (bus_addr_t)roundup((u_long)dma, 2048);
646 p = (caddr_t)roundup((u_long)p, 2048);
649 * Initialize transmit buffer descriptors
651 for (i = 0; i < HME_NTXDESC; i++) {
652 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
653 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
656 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
657 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
658 for (i = 0; i < HME_NTXQ; i++) {
659 td = &sc->sc_rb.rb_txdesc[i];
660 if (td->htx_m != NULL) {
661 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
662 BUS_DMASYNC_POSTWRITE);
663 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
667 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
671 * Initialize receive buffer descriptors
673 for (i = 0; i < HME_NRXDESC; i++) {
674 error = hme_add_rxbuf(sc, i, 1);
679 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
682 hr->rb_tdhead = hr->rb_tdtail = 0;
685 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
687 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
689 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
690 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
691 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
692 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
697 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
698 u_int32_t clr, u_int32_t set)
704 HME_MAC_WRITE_4(sc, reg, val);
705 HME_MAC_BARRIER(sc, reg, 4,
706 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
707 if (clr == 0 && set == 0)
708 return (1); /* just write, no bits to wait for */
712 val = HME_MAC_READ_4(sc, reg);
714 /* After 3.5ms, we should have been done. */
715 device_printf(sc->sc_dev, "timeout while writing to "
716 "MAC configuration register\n");
719 } while ((val & clr) != 0 && (val & set) != set);
724 * Initialization of interface; set up initialization block
725 * and transmit/receive descriptor rings.
730 struct hme_softc *sc = (struct hme_softc *)xsc;
738 hme_init_locked(struct hme_softc *sc)
740 struct ifnet *ifp = sc->sc_ifp;
744 HME_LOCK_ASSERT(sc, MA_OWNED);
746 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
750 * Initialization sequence. The numbered steps below correspond
751 * to the sequence outlined in section 6.3.5.1 in the Ethernet
752 * Channel Engine manual (part of the PCIO manual).
753 * See also the STP2002-STQ document from Sun Microsystems.
756 /* step 1 & 2. Reset the Ethernet Channel */
759 /* Re-initialize the MIF */
763 /* Mask all MIF interrupts, just in case */
764 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
767 /* step 3. Setup data structures in host memory */
768 if (hme_meminit(sc) != 0) {
769 device_printf(sc->sc_dev, "out of buffers; init aborted.");
773 /* step 4. TX MAC registers & counters */
774 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
775 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
776 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
777 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
778 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
780 /* Load station MAC address */
782 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
783 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
784 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
787 * Init seed for backoff
788 * (source suggested by manual: low 10 bits of MAC address)
790 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
791 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
793 /* Note: Accepting power-on default for other MAC registers here.. */
795 /* step 5. RX MAC registers & counters */
798 /* step 6 & 7. Program Descriptor Ring Base Addresses */
799 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
800 /* Transmit Descriptor ring size: in increments of 16 */
801 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
803 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
804 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
806 /* step 8. Global Configuration & Interrupt Mask */
807 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
808 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
809 HME_SEB_STAT_HOSTTOTX |
810 HME_SEB_STAT_RXTOHOST |
812 HME_SEB_STAT_TXPERR |
813 HME_SEB_STAT_RCNTEXP |
814 HME_SEB_STAT_ALL_ERRORS ));
816 switch (sc->sc_burst) {
821 v = HME_SEB_CFG_BURST16;
824 v = HME_SEB_CFG_BURST32;
827 v = HME_SEB_CFG_BURST64;
831 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
832 * Allowing 64bit transfers breaks TX checksum offload as well.
833 * Don't know this comes from hardware bug or driver's DMAing
836 * if (sc->sc_flags & HME_PCI == 0)
837 * v |= HME_SEB_CFG_64BIT;
839 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
841 /* step 9. ETX Configuration: use mostly default values */
844 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
845 v |= HME_ETX_CFG_DMAENABLE;
846 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
848 /* step 10. ERX Configuration */
849 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
851 /* Encode Receive Descriptor ring size: four possible values */
852 v &= ~HME_ERX_CFG_RINGSIZEMSK;
853 switch (HME_NRXDESC) {
855 v |= HME_ERX_CFG_RINGSIZE32;
858 v |= HME_ERX_CFG_RINGSIZE64;
861 v |= HME_ERX_CFG_RINGSIZE128;
864 v |= HME_ERX_CFG_RINGSIZE256;
867 printf("hme: invalid Receive Descriptor ring size\n");
871 /* Enable DMA, fix RX first byte offset. */
872 v &= ~HME_ERX_CFG_FBO_MASK;
873 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
874 /* RX TCP/UDP checksum offset */
875 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
876 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
878 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
879 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
881 /* step 11. XIF Configuration */
882 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
884 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
885 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
887 /* step 12. RX_MAC Configuration Register */
888 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
889 v |= HME_MAC_RXCFG_ENABLE;
890 v &= ~(HME_MAC_RXCFG_DCRCS);
891 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
892 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
894 /* step 13. TX_MAC Configuration Register */
895 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
896 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
897 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
898 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
900 /* step 14. Issue Transmit Pending command */
903 /* Debug: double-check. */
904 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
905 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
906 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
907 HME_ERX_READ_4(sc, HME_ERXI_RING),
908 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
909 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
910 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
911 HME_ERX_READ_4(sc, HME_ERXI_CFG),
912 HME_ETX_READ_4(sc, HME_ETXI_CFG));
913 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
914 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
915 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
918 ifp->if_drv_flags |= IFF_DRV_RUNNING;
919 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
921 /* Set the current media. */
922 hme_mediachange_locked(sc);
924 /* Start the one second timer. */
925 sc->sc_wdog_timer = 0;
926 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
930 * Routine to DMA map an mbuf chain, set up the descriptor rings
931 * accordingly and start the transmission.
932 * Returns 0 on success, -1 if there were not enough free descriptors
933 * to map the packet, or an errno otherwise.
935 * XXX: this relies on the fact that segments returned by
936 * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
937 * boundary on (i.e. potentially before ds_addr) to the first
938 * boundary beyond the end. This is usually a safe assumption to
939 * make, but is not documented.
942 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
944 bus_dma_segment_t segs[HME_NTXSEGS];
945 struct hme_txdesc *htx;
949 int error, i, nsegs, pci, ri, si;
950 uint32_t cflags, flags;
952 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
956 if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
957 if (M_WRITABLE(*m0) == 0) {
958 m = m_dup(*m0, M_NOWAIT);
964 i = sizeof(struct ether_header);
965 m = m_pullup(*m0, i + sizeof(struct ip));
970 ip = (struct ip *)(mtod(m, caddr_t) + i);
971 i += (ip->ip_hl << 2);
972 cflags = i << HME_XD_TXCKSUM_SSHIFT |
973 ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
978 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
979 *m0, segs, &nsegs, 0);
980 if (error == EFBIG) {
981 m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS);
988 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
989 *m0, segs, &nsegs, 0);
995 } else if (error != 0)
997 /* If nsegs is wrong then the stack is corrupt. */
998 KASSERT(nsegs <= HME_NTXSEGS,
999 ("%s: too many DMA segments (%d)", __func__, nsegs));
1005 if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
1006 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1007 /* Retry with m_collapse(9)? */
1010 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
1012 si = ri = sc->sc_rb.rb_tdhead;
1013 txd = sc->sc_rb.rb_txd;
1014 pci = sc->sc_flags & HME_PCI;
1015 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
1016 HME_XD_GETFLAGS(pci, txd, ri));
1017 for (i = 0; i < nsegs; i++) {
1018 /* Fill the ring entry. */
1019 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
1021 flags |= HME_XD_SOP | cflags;
1023 flags |= HME_XD_OWN | cflags;
1024 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
1026 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
1027 HME_XD_SETFLAGS(pci, txd, ri, flags);
1028 sc->sc_rb.rb_td_nbusy++;
1029 htx->htx_lastdesc = ri;
1030 ri = (ri + 1) % HME_NTXDESC;
1032 sc->sc_rb.rb_tdhead = ri;
1034 /* set EOP on the last descriptor */
1035 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1036 flags = HME_XD_GETFLAGS(pci, txd, ri);
1037 flags |= HME_XD_EOP;
1038 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1040 HME_XD_SETFLAGS(pci, txd, ri, flags);
1042 /* Turn the first descriptor ownership to the hme */
1043 flags = HME_XD_GETFLAGS(pci, txd, si);
1044 flags |= HME_XD_OWN;
1045 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1047 HME_XD_SETFLAGS(pci, txd, si, flags);
1049 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1050 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1053 /* start the transmission. */
1054 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1060 * Pass a packet to the higher levels.
1063 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1065 struct ifnet *ifp = sc->sc_ifp;
1068 if (len <= sizeof(struct ether_header) ||
1069 len > HME_MAX_FRAMESIZE) {
1071 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1075 hme_discard_rxbuf(sc, ix);
1079 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1080 CTR1(KTR_HME, "hme_read: len %d", len);
1082 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1084 * hme_add_rxbuf will leave the old buffer in the ring until
1085 * it is sure that a new buffer can be mapped. If it can not,
1086 * drop the packet, but leave the interface up.
1089 hme_discard_rxbuf(sc, ix);
1095 m->m_pkthdr.rcvif = ifp;
1096 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1097 m_adj(m, HME_RXOFFS);
1098 /* RX TCP/UDP checksum */
1099 if (ifp->if_capenable & IFCAP_RXCSUM)
1100 hme_rxcksum(m, flags);
1101 /* Pass the packet up. */
1103 (*ifp->if_input)(ifp, m);
1108 hme_start(struct ifnet *ifp)
1110 struct hme_softc *sc = ifp->if_softc;
1113 hme_start_locked(ifp);
1118 hme_start_locked(struct ifnet *ifp)
1120 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1124 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1125 IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
1128 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1129 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1130 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1134 error = hme_load_txmbuf(sc, &m);
1138 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1139 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1147 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1148 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1149 sc->sc_wdog_timer = 5;
1154 * Transmit interrupt.
1157 hme_tint(struct hme_softc *sc)
1160 struct ifnet *ifp = sc->sc_ifp;
1161 struct hme_txdesc *htx;
1162 unsigned int ri, txflags;
1164 txd = sc->sc_rb.rb_txd;
1165 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1166 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1167 /* Fetch current position in the transmit ring */
1168 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1169 if (sc->sc_rb.rb_td_nbusy <= 0) {
1170 CTR0(KTR_HME, "hme_tint: not busy!");
1174 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
1175 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1177 if ((txflags & HME_XD_OWN) != 0)
1180 CTR0(KTR_HME, "hme_tint: not owned");
1181 --sc->sc_rb.rb_td_nbusy;
1182 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1184 /* Complete packet transmitted? */
1185 if ((txflags & HME_XD_EOP) == 0)
1188 KASSERT(htx->htx_lastdesc == ri,
1189 ("%s: ring indices skewed: %d != %d!",
1190 __func__, htx->htx_lastdesc, ri));
1191 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1192 BUS_DMASYNC_POSTWRITE);
1193 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1196 m_freem(htx->htx_m);
1198 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1199 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1200 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1202 sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
1205 sc->sc_rb.rb_tdtail = ri;
1207 hme_start_locked(ifp);
1211 * RX TCP/UDP checksum
1214 hme_rxcksum(struct mbuf *m, u_int32_t flags)
1216 struct ether_header *eh;
1219 int32_t hlen, len, pktlen;
1220 u_int16_t cksum, *opts;
1223 pktlen = m->m_pkthdr.len;
1224 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1226 eh = mtod(m, struct ether_header *);
1227 if (eh->ether_type != htons(ETHERTYPE_IP))
1229 ip = (struct ip *)(eh + 1);
1230 if (ip->ip_v != IPVERSION)
1233 hlen = ip->ip_hl << 2;
1234 pktlen -= sizeof(struct ether_header);
1235 if (hlen < sizeof(struct ip))
1237 if (ntohs(ip->ip_len) < hlen)
1239 if (ntohs(ip->ip_len) != pktlen)
1241 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1242 return; /* can't handle fragmented packet */
1246 if (pktlen < (hlen + sizeof(struct tcphdr)))
1250 if (pktlen < (hlen + sizeof(struct udphdr)))
1252 uh = (struct udphdr *)((caddr_t)ip + hlen);
1253 if (uh->uh_sum == 0)
1254 return; /* no checksum */
1260 cksum = ~(flags & HME_XD_RXCKSUM);
1261 /* checksum fixup for IP options */
1262 len = hlen - sizeof(struct ip);
1264 opts = (u_int16_t *)(ip + 1);
1265 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1266 temp32 = cksum - *opts;
1267 temp32 = (temp32 >> 16) + (temp32 & 65535);
1268 cksum = temp32 & 65535;
1271 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1272 m->m_pkthdr.csum_data = cksum;
1276 * Receive interrupt.
1279 hme_rint(struct hme_softc *sc)
1281 caddr_t xdr = sc->sc_rb.rb_rxd;
1282 struct ifnet *ifp = sc->sc_ifp;
1283 unsigned int ri, len;
1288 * Process all buffers with valid data.
1290 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1291 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1292 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
1293 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1294 if ((flags & HME_XD_OWN) != 0)
1298 if ((flags & HME_XD_OFL) != 0) {
1299 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1300 "flags=0x%x\n", ri, flags);
1302 hme_discard_rxbuf(sc, ri);
1304 len = HME_XD_DECODE_RSIZE(flags);
1305 hme_read(sc, ri, len, flags);
1309 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1310 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1312 sc->sc_rb.rb_rdtail = ri;
1316 hme_eint(struct hme_softc *sc, u_int status)
1319 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1320 device_printf(sc->sc_dev, "XXXlink status changed: "
1321 "cfg=%#x, stat=%#x, sm=%#x\n",
1322 HME_MIF_READ_4(sc, HME_MIFI_CFG),
1323 HME_MIF_READ_4(sc, HME_MIFI_STAT),
1324 HME_MIF_READ_4(sc, HME_MIFI_SM));
1328 /* check for fatal errors that needs reset to unfreeze DMA engine */
1329 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1330 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1331 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1332 hme_init_locked(sc);
1339 struct hme_softc *sc = (struct hme_softc *)v;
1343 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1344 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1346 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1347 hme_eint(sc, status);
1349 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1352 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1358 hme_watchdog(struct hme_softc *sc)
1360 struct ifnet *ifp = sc->sc_ifp;
1362 HME_LOCK_ASSERT(sc, MA_OWNED);
1365 CTR1(KTR_HME, "hme_watchdog: status %x",
1366 (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
1369 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1372 if ((sc->sc_flags & HME_LINK) != 0)
1373 device_printf(sc->sc_dev, "device timeout\n");
1374 else if (bootverbose)
1375 device_printf(sc->sc_dev, "device timeout (no link)\n");
1378 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1379 hme_init_locked(sc);
1380 hme_start_locked(ifp);
1381 return (EJUSTRETURN);
1385 * Initialize the MII Management Interface
1388 hme_mifinit(struct hme_softc *sc)
1393 * Configure the MIF in frame mode, polling disabled, internal PHY
1396 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
1399 * If the currently selected media uses the external transceiver,
1400 * enable its MII drivers (which basically isolates the internal
1401 * one and vice versa). In case the current media hasn't been set,
1402 * yet, we default to the internal transceiver.
1404 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
1405 if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
1406 sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
1408 v |= HME_MAC_XIF_MIIENABLE;
1410 v &= ~HME_MAC_XIF_MIIENABLE;
1411 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
1418 hme_mii_readreg(device_t dev, int phy, int reg)
1420 struct hme_softc *sc;
1424 sc = device_get_softc(dev);
1425 /* Select the desired PHY in the MIF configuration register */
1426 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1427 if (phy == HME_PHYAD_EXTERNAL)
1428 v |= HME_MIF_CFG_PHY;
1430 v &= ~HME_MIF_CFG_PHY;
1431 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1433 /* Construct the frame command */
1434 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1436 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1437 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1438 (reg << HME_MIF_FO_REGAD_SHIFT);
1440 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1441 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1442 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1443 for (n = 0; n < 100; n++) {
1445 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1446 if (v & HME_MIF_FO_TALSB)
1447 return (v & HME_MIF_FO_DATA);
1450 device_printf(sc->sc_dev, "mii_read timeout\n");
1455 hme_mii_writereg(device_t dev, int phy, int reg, int val)
1457 struct hme_softc *sc;
1461 sc = device_get_softc(dev);
1462 /* Select the desired PHY in the MIF configuration register */
1463 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1464 if (phy == HME_PHYAD_EXTERNAL)
1465 v |= HME_MIF_CFG_PHY;
1467 v &= ~HME_MIF_CFG_PHY;
1468 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1470 /* Construct the frame command */
1471 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1473 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1474 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1475 (reg << HME_MIF_FO_REGAD_SHIFT) |
1476 (val & HME_MIF_FO_DATA);
1478 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1479 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1480 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1481 for (n = 0; n < 100; n++) {
1483 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1484 if (v & HME_MIF_FO_TALSB)
1488 device_printf(sc->sc_dev, "mii_write timeout\n");
1493 hme_mii_statchg(device_t dev)
1495 struct hme_softc *sc;
1496 uint32_t rxcfg, txcfg;
1498 sc = device_get_softc(dev);
1501 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1502 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
1505 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1506 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1507 sc->sc_flags |= HME_LINK;
1509 sc->sc_flags &= ~HME_LINK;
1511 txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1512 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
1513 HME_MAC_TXCFG_ENABLE, 0))
1514 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
1515 rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1516 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
1517 HME_MAC_RXCFG_ENABLE, 0))
1518 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1520 /* Set the MAC Full Duplex bit appropriately. */
1521 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1522 txcfg |= HME_MAC_TXCFG_FULLDPLX;
1524 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
1525 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
1527 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1528 (sc->sc_flags & HME_LINK) != 0) {
1529 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
1530 HME_MAC_TXCFG_ENABLE))
1531 device_printf(sc->sc_dev, "cannot enable TX MAC\n");
1532 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
1533 HME_MAC_RXCFG_ENABLE))
1534 device_printf(sc->sc_dev, "cannot enable RX MAC\n");
1539 hme_mediachange(struct ifnet *ifp)
1541 struct hme_softc *sc = ifp->if_softc;
1545 error = hme_mediachange_locked(sc);
1551 hme_mediachange_locked(struct hme_softc *sc)
1553 struct mii_softc *child;
1555 HME_LOCK_ASSERT(sc, MA_OWNED);
1558 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1559 device_printf(sc->sc_dev, "hme_mediachange_locked");
1565 * If both PHYs are present reset them. This is required for
1566 * unisolating the previously isolated PHY when switching PHYs.
1567 * As the above hme_mifinit() call will set the MII drivers in
1568 * the XIF configuration register according to the currently
1569 * selected media, there should be no window during which the
1570 * data paths of both transceivers are open at the same time,
1571 * even if the PHY device drivers use MIIF_NOISOLATE.
1573 if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
1574 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
1576 return (mii_mediachg(sc->sc_mii));
1580 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1582 struct hme_softc *sc = ifp->if_softc;
1585 if ((ifp->if_flags & IFF_UP) == 0) {
1590 mii_pollstat(sc->sc_mii);
1591 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1592 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1597 * Process an ioctl request.
1600 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1602 struct hme_softc *sc = ifp->if_softc;
1603 struct ifreq *ifr = (struct ifreq *)data;
1609 if ((ifp->if_flags & IFF_UP) != 0) {
1610 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1611 ((ifp->if_flags ^ sc->sc_ifflags) &
1612 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1613 hme_setladrf(sc, 1);
1615 hme_init_locked(sc);
1616 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1618 if ((ifp->if_flags & IFF_LINK0) != 0)
1619 sc->sc_csum_features |= CSUM_UDP;
1621 sc->sc_csum_features &= ~CSUM_UDP;
1622 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1623 ifp->if_hwassist = sc->sc_csum_features;
1624 sc->sc_ifflags = ifp->if_flags;
1631 hme_setladrf(sc, 1);
1637 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1641 ifp->if_capenable = ifr->ifr_reqcap;
1642 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1643 ifp->if_hwassist = sc->sc_csum_features;
1645 ifp->if_hwassist = 0;
1649 error = ether_ioctl(ifp, cmd, data);
1657 * Set up the logical address filter.
1660 hme_setladrf(struct hme_softc *sc, int reenable)
1662 struct ifnet *ifp = sc->sc_ifp;
1663 struct ifmultiaddr *inm;
1668 HME_LOCK_ASSERT(sc, MA_OWNED);
1669 /* Clear the hash table. */
1670 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1672 /* Get the current RX configuration. */
1673 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1676 * Turn off promiscuous mode, promiscuous group mode (all multicast),
1677 * and hash filter. Depending on the case, the right bit will be
1680 macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
1683 * Disable the receiver while changing it's state as the documentation
1685 * We then must wait until the bit clears in the register. This should
1686 * take at most 3.5ms.
1688 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1689 HME_MAC_RXCFG_ENABLE, 0))
1690 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1691 /* Disable the hash filter before writing to the filter registers. */
1692 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1693 HME_MAC_RXCFG_HENABLE, 0))
1694 device_printf(sc->sc_dev, "cannot disable hash filter\n");
1696 /* Make the RX MAC really SIMPLEX. */
1697 macc |= HME_MAC_RXCFG_ME;
1699 macc |= HME_MAC_RXCFG_ENABLE;
1701 macc &= ~HME_MAC_RXCFG_ENABLE;
1703 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1704 macc |= HME_MAC_RXCFG_PMISC;
1707 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1708 macc |= HME_MAC_RXCFG_PGRP;
1712 macc |= HME_MAC_RXCFG_HENABLE;
1715 * Set up multicast address filter by passing all multicast addresses
1716 * through a crc generator, and then using the high order 6 bits as an
1717 * index into the 64 bit logical address filter. The high order bit
1718 * selects the word, while the rest of the bits select the bit within
1722 if_maddr_rlock(ifp);
1723 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
1724 if (inm->ifma_addr->sa_family != AF_LINK)
1726 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1727 inm->ifma_addr), ETHER_ADDR_LEN);
1729 /* Just want the 6 most significant bits. */
1732 /* Set the corresponding bit in the filter. */
1733 hash[crc >> 4] |= 1 << (crc & 0xf);
1735 if_maddr_runlock(ifp);
1738 /* Now load the hash table into the chip */
1739 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1740 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1741 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1742 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1743 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1744 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1746 device_printf(sc->sc_dev, "cannot configure RX MAC\n");