2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
8 * This code is derived from software contributed to The NetBSD Foundation
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
39 * from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
46 * HME Ethernet module driver.
48 * The HME is e.g. part of the PCIO PCI multi function device.
49 * It supports TX gathering and TX and RX checksum offloading.
50 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
51 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
52 * are skipped to make sure the header after the ethernet header is aligned on a
53 * natural boundary, so this ensures minimal wastage in the most common case.
55 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
56 * maximum packet size (this is not verified). Buffers starting on odd
57 * boundaries must be mapped so that the burst can start on a natural boundary.
59 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
60 * In reality, we can do the same technique for UDP datagram too. However,
61 * the hardware doesn't compensate the checksum for UDP datagram which can yield
62 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
63 * can be reactivated by setting special link option link0 with ifconfig(8).
65 #define HME_CSUM_FEATURES (CSUM_TCP)
69 #define KTR_HME KTR_SPARE2 /* XXX */
71 #include <sys/param.h>
72 #include <sys/systm.h>
74 #include <sys/endian.h>
75 #include <sys/kernel.h>
76 #include <sys/module.h>
79 #include <sys/malloc.h>
80 #include <sys/socket.h>
81 #include <sys/sockio.h>
84 #include <net/ethernet.h>
86 #include <net/if_var.h>
87 #include <net/if_arp.h>
88 #include <net/if_dl.h>
89 #include <net/if_media.h>
90 #include <net/if_types.h>
91 #include <net/if_vlan_var.h>
93 #include <netinet/in.h>
94 #include <netinet/in_systm.h>
95 #include <netinet/ip.h>
96 #include <netinet/tcp.h>
97 #include <netinet/udp.h>
99 #include <dev/mii/mii.h>
100 #include <dev/mii/miivar.h>
102 #include <machine/bus.h>
104 #include <dev/hme/if_hmereg.h>
105 #include <dev/hme/if_hmevar.h>
107 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
108 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
110 static void hme_start(struct ifnet *);
111 static void hme_start_locked(struct ifnet *);
112 static void hme_stop(struct hme_softc *);
113 static int hme_ioctl(struct ifnet *, u_long, caddr_t);
114 static void hme_tick(void *);
115 static int hme_watchdog(struct hme_softc *);
116 static void hme_init(void *);
117 static void hme_init_locked(struct hme_softc *);
118 static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
119 static int hme_meminit(struct hme_softc *);
120 static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
121 u_int32_t, u_int32_t);
122 static void hme_mifinit(struct hme_softc *);
123 static void hme_setladrf(struct hme_softc *, int);
125 static int hme_mediachange(struct ifnet *);
126 static int hme_mediachange_locked(struct hme_softc *);
127 static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
129 static int hme_load_txmbuf(struct hme_softc *, struct mbuf **);
130 static void hme_read(struct hme_softc *, int, int, u_int32_t);
131 static void hme_eint(struct hme_softc *, u_int);
132 static void hme_rint(struct hme_softc *);
133 static void hme_tint(struct hme_softc *);
134 static void hme_rxcksum(struct mbuf *, u_int32_t);
136 static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
138 devclass_t hme_devclass;
142 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
143 MODULE_DEPEND(hme, miibus, 1, 1, 1);
145 #define HME_SPC_READ_4(spc, sc, offs) \
146 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
148 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
149 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
151 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
152 bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
155 #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
156 #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
157 #define HME_SEB_BARRIER(sc, offs, l, f) \
158 HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
159 #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
160 #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
161 #define HME_ERX_BARRIER(sc, offs, l, f) \
162 HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
163 #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
164 #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
165 #define HME_ETX_BARRIER(sc, offs, l, f) \
166 HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
167 #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
168 #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
169 #define HME_MAC_BARRIER(sc, offs, l, f) \
170 HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
171 #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
172 #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
173 #define HME_MIF_BARRIER(sc, offs, l, f) \
174 HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
177 #define HME_WHINE(dev, ...) do { \
178 if (hme_nerr++ < HME_MAXERR) \
179 device_printf(dev, __VA_ARGS__); \
180 if (hme_nerr == HME_MAXERR) { \
181 device_printf(dev, "too many errors; not reporting " \
186 /* Support oversized VLAN frames. */
187 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
190 hme_config(struct hme_softc *sc)
193 struct mii_softc *child;
195 int error, rdesc, tdesc, i;
197 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
202 * HME common initialization.
204 * hme_softc fields that must be initialized by the front-end:
209 * the bus handles, tags and offsets (splitted for SBus compatibility):
210 * sc_seb{t,h,o} (Shared Ethernet Block registers)
211 * sc_erx{t,h,o} (Receiver Unit registers)
212 * sc_etx{t,h,o} (Transmitter Unit registers)
213 * sc_mac{t,h,o} (MAC registers)
214 * sc_mif{t,h,o} (Management Interface registers)
216 * the maximum bus burst size:
221 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
223 /* Make sure the chip is stopped. */
228 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
229 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
230 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
231 NULL, NULL, &sc->sc_pdmatag);
236 * Create control, RX and TX mbuf DMA tags.
237 * Buffer descriptors must be aligned on a 2048 byte boundary;
238 * take this into account when calculating the size. Note that
239 * the maximum number of descriptors (256) occupies 2048 bytes,
240 * so we allocate that much regardless of HME_N*DESC.
243 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
244 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
245 1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
249 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
250 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
251 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
255 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
256 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
257 MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
258 NULL, NULL, &sc->sc_tdmatag);
262 /* Allocate the control DMA buffer. */
263 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
264 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
266 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
270 /* Load the control DMA buffer. */
271 sc->sc_rb.rb_dmabase = 0;
272 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
273 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
274 sc->sc_rb.rb_dmabase == 0) {
275 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
279 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
280 sc->sc_rb.rb_dmabase);
283 * Prepare the RX descriptors. rdesc serves as marker for the last
284 * processed descriptor and may be used later on.
286 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
287 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
288 error = bus_dmamap_create(sc->sc_rdmatag, 0,
289 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
293 error = bus_dmamap_create(sc->sc_rdmatag, 0,
294 &sc->sc_rb.rb_spare_dmamap);
297 /* Same for the TX descs. */
298 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
299 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
300 error = bus_dmamap_create(sc->sc_tdmatag, 0,
301 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
306 sc->sc_csum_features = HME_CSUM_FEATURES;
307 /* Initialize ifnet structure. */
309 if_initname(ifp, device_get_name(sc->sc_dev),
310 device_get_unit(sc->sc_dev));
311 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
312 ifp->if_start = hme_start;
313 ifp->if_ioctl = hme_ioctl;
314 ifp->if_init = hme_init;
315 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
316 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
317 IFQ_SET_READY(&ifp->if_snd);
322 * DP83840A used with HME chips don't advertise their media
323 * capabilities themselves properly so force writing the ANAR
324 * according to the BMSR in mii_phy_setmedia().
326 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
327 hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL,
328 MII_OFFSET_ANY, MIIF_FORCEANEG);
329 i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
330 hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL,
331 MII_OFFSET_ANY, MIIF_FORCEANEG);
332 if (error != 0 && i != 0) {
334 device_printf(sc->sc_dev, "attaching PHYs failed\n");
337 sc->sc_mii = device_get_softc(sc->sc_miibus);
340 * Walk along the list of attached MII devices and
341 * establish an `MII instance' to `PHY number'
342 * mapping. We'll use this mapping to enable the MII
343 * drivers of the external transceiver according to
344 * the currently selected media.
346 sc->sc_phys[0] = sc->sc_phys[1] = -1;
347 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
349 * Note: we support just two PHYs: the built-in
350 * internal device and an external on the MII
353 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
354 child->mii_phy != HME_PHYAD_INTERNAL) ||
355 child->mii_inst > 1) {
356 device_printf(sc->sc_dev, "cannot accommodate "
357 "MII device %s at phy %d, instance %d\n",
358 device_get_name(child->mii_dev),
359 child->mii_phy, child->mii_inst);
363 sc->sc_phys[child->mii_inst] = child->mii_phy;
366 /* Attach the interface. */
367 ether_ifattach(ifp, sc->sc_enaddr);
370 * Tell the upper layer(s) we support long frames/checksum offloads.
372 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
373 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
374 ifp->if_hwassist |= sc->sc_csum_features;
375 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
379 for (i = 0; i < tdesc; i++) {
380 bus_dmamap_destroy(sc->sc_tdmatag,
381 sc->sc_rb.rb_txdesc[i].htx_dmamap);
383 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
385 for (i = 0; i < rdesc; i++) {
386 bus_dmamap_destroy(sc->sc_rdmatag,
387 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
389 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
391 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
393 bus_dma_tag_destroy(sc->sc_tdmatag);
395 bus_dma_tag_destroy(sc->sc_rdmatag);
397 bus_dma_tag_destroy(sc->sc_cdmatag);
399 bus_dma_tag_destroy(sc->sc_pdmatag);
406 hme_detach(struct hme_softc *sc)
408 struct ifnet *ifp = sc->sc_ifp;
414 callout_drain(&sc->sc_tick_ch);
417 device_delete_child(sc->sc_dev, sc->sc_miibus);
419 for (i = 0; i < HME_NTXQ; i++) {
420 bus_dmamap_destroy(sc->sc_tdmatag,
421 sc->sc_rb.rb_txdesc[i].htx_dmamap);
423 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
424 for (i = 0; i < HME_NRXDESC; i++) {
425 bus_dmamap_destroy(sc->sc_rdmatag,
426 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
428 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
429 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
430 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
431 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
432 bus_dma_tag_destroy(sc->sc_tdmatag);
433 bus_dma_tag_destroy(sc->sc_rdmatag);
434 bus_dma_tag_destroy(sc->sc_cdmatag);
435 bus_dma_tag_destroy(sc->sc_pdmatag);
439 hme_suspend(struct hme_softc *sc)
448 hme_resume(struct hme_softc *sc)
450 struct ifnet *ifp = sc->sc_ifp;
453 if ((ifp->if_flags & IFF_UP) != 0)
459 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
461 struct hme_softc *sc = (struct hme_softc *)xsc;
466 ("%s: too many DMA segments (%d)", __func__, nsegs));
467 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
473 struct hme_softc *sc = arg;
476 HME_LOCK_ASSERT(sc, MA_OWNED);
480 * Unload collision counters
482 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
483 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
484 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
485 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
486 HME_MAC_READ_4(sc, HME_MACI_LTCNT));
489 * then clear the hardware counters.
491 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
492 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
493 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
494 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
496 mii_tick(sc->sc_mii);
498 if (hme_watchdog(sc) == EJUSTRETURN)
501 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
505 hme_stop(struct hme_softc *sc)
510 callout_stop(&sc->sc_tick_ch);
511 sc->sc_wdog_timer = 0;
512 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
513 sc->sc_flags &= ~HME_LINK;
515 /* Mask all interrupts */
516 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
518 /* Reset transmitter and receiver */
519 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
521 HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
522 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
523 for (n = 0; n < 20; n++) {
524 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
525 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
530 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
534 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
535 * ring for subsequent use.
538 hme_discard_rxbuf(struct hme_softc *sc, int ix)
542 * Dropped a packet, reinitialize the descriptor and turn the
543 * ownership back to the hardware.
545 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
546 ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
547 &sc->sc_rb.rb_rxdesc[ix])));
551 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
553 struct hme_rxdesc *rd;
555 bus_dma_segment_t segs[1];
560 rd = &sc->sc_rb.rb_rxdesc[ri];
561 unmap = rd->hrx_m != NULL;
562 if (unmap && keepold) {
564 * Reinitialize the descriptor flags, as they may have been
565 * altered by the hardware.
567 hme_discard_rxbuf(sc, ri);
570 if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
572 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
573 b = mtod(m, uintptr_t);
575 * Required alignment boundary. At least 16 is needed, but since
576 * the mapping must be done in a way that a burst can start on a
577 * natural boundary we might need to extend this.
579 a = imax(HME_MINRXALIGN, sc->sc_burst);
581 * Make sure the buffer suitably aligned. The 2 byte offset is removed
582 * when the mbuf is handed up. XXX: this ensures at least 16 byte
583 * alignment of the header adjacent to the ethernet header, which
584 * should be sufficient in all cases. Nevertheless, this second-guesses
587 m_adj(m, roundup2(b, a) - b);
588 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
589 m, segs, &nsegs, 0) != 0) {
593 /* If nsegs is wrong then the stack is corrupt. */
595 ("%s: too many DMA segments (%d)", __func__, nsegs));
597 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
598 BUS_DMASYNC_POSTREAD);
599 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
601 map = rd->hrx_dmamap;
602 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
603 sc->sc_rb.rb_spare_dmamap = map;
604 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
605 HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
608 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
609 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
614 hme_meminit(struct hme_softc *sc)
616 struct hme_ring *hr = &sc->sc_rb;
617 struct hme_txdesc *td;
624 dma = hr->rb_dmabase;
627 * Allocate transmit descriptors
631 p += HME_NTXDESC * HME_XD_SIZE;
632 dma += HME_NTXDESC * HME_XD_SIZE;
634 * We have reserved descriptor space until the next 2048 byte
637 dma = (bus_addr_t)roundup((u_long)dma, 2048);
638 p = (caddr_t)roundup((u_long)p, 2048);
641 * Allocate receive descriptors
645 p += HME_NRXDESC * HME_XD_SIZE;
646 dma += HME_NRXDESC * HME_XD_SIZE;
647 /* Again move forward to the next 2048 byte boundary.*/
648 dma = (bus_addr_t)roundup((u_long)dma, 2048);
649 p = (caddr_t)roundup((u_long)p, 2048);
652 * Initialize transmit buffer descriptors
654 for (i = 0; i < HME_NTXDESC; i++) {
655 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
656 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
659 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
660 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
661 for (i = 0; i < HME_NTXQ; i++) {
662 td = &sc->sc_rb.rb_txdesc[i];
663 if (td->htx_m != NULL) {
664 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
665 BUS_DMASYNC_POSTWRITE);
666 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
670 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
674 * Initialize receive buffer descriptors
676 for (i = 0; i < HME_NRXDESC; i++) {
677 error = hme_add_rxbuf(sc, i, 1);
682 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
683 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
685 hr->rb_tdhead = hr->rb_tdtail = 0;
688 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
690 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
692 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
693 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
694 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
695 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
700 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
701 u_int32_t clr, u_int32_t set)
707 HME_MAC_WRITE_4(sc, reg, val);
708 HME_MAC_BARRIER(sc, reg, 4,
709 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
710 if (clr == 0 && set == 0)
711 return (1); /* just write, no bits to wait for */
715 val = HME_MAC_READ_4(sc, reg);
717 /* After 3.5ms, we should have been done. */
718 device_printf(sc->sc_dev, "timeout while writing to "
719 "MAC configuration register\n");
722 } while ((val & clr) != 0 && (val & set) != set);
727 * Initialization of interface; set up initialization block
728 * and transmit/receive descriptor rings.
733 struct hme_softc *sc = (struct hme_softc *)xsc;
741 hme_init_locked(struct hme_softc *sc)
743 struct ifnet *ifp = sc->sc_ifp;
747 HME_LOCK_ASSERT(sc, MA_OWNED);
749 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
753 * Initialization sequence. The numbered steps below correspond
754 * to the sequence outlined in section 6.3.5.1 in the Ethernet
755 * Channel Engine manual (part of the PCIO manual).
756 * See also the STP2002-STQ document from Sun Microsystems.
759 /* step 1 & 2. Reset the Ethernet Channel */
762 /* Re-initialize the MIF */
766 /* Mask all MIF interrupts, just in case */
767 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
770 /* step 3. Setup data structures in host memory */
771 if (hme_meminit(sc) != 0) {
772 device_printf(sc->sc_dev, "out of buffers; init aborted.");
776 /* step 4. TX MAC registers & counters */
777 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
778 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
779 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
780 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
781 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
783 /* Load station MAC address */
785 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
786 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
787 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
790 * Init seed for backoff
791 * (source suggested by manual: low 10 bits of MAC address)
793 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
794 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
796 /* Note: Accepting power-on default for other MAC registers here.. */
798 /* step 5. RX MAC registers & counters */
801 /* step 6 & 7. Program Descriptor Ring Base Addresses */
802 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
803 /* Transmit Descriptor ring size: in increments of 16 */
804 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
806 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
807 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
809 /* step 8. Global Configuration & Interrupt Mask */
810 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
811 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
812 HME_SEB_STAT_HOSTTOTX |
813 HME_SEB_STAT_RXTOHOST |
815 HME_SEB_STAT_TXPERR |
816 HME_SEB_STAT_RCNTEXP |
817 HME_SEB_STAT_ALL_ERRORS ));
819 switch (sc->sc_burst) {
824 v = HME_SEB_CFG_BURST16;
827 v = HME_SEB_CFG_BURST32;
830 v = HME_SEB_CFG_BURST64;
834 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
835 * Allowing 64bit transfers breaks TX checksum offload as well.
836 * Don't know this comes from hardware bug or driver's DMAing
839 * if (sc->sc_flags & HME_PCI == 0)
840 * v |= HME_SEB_CFG_64BIT;
842 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
844 /* step 9. ETX Configuration: use mostly default values */
847 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
848 v |= HME_ETX_CFG_DMAENABLE;
849 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
851 /* step 10. ERX Configuration */
852 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
854 /* Encode Receive Descriptor ring size: four possible values */
855 v &= ~HME_ERX_CFG_RINGSIZEMSK;
856 switch (HME_NRXDESC) {
858 v |= HME_ERX_CFG_RINGSIZE32;
861 v |= HME_ERX_CFG_RINGSIZE64;
864 v |= HME_ERX_CFG_RINGSIZE128;
867 v |= HME_ERX_CFG_RINGSIZE256;
870 printf("hme: invalid Receive Descriptor ring size\n");
874 /* Enable DMA, fix RX first byte offset. */
875 v &= ~HME_ERX_CFG_FBO_MASK;
876 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
877 /* RX TCP/UDP checksum offset */
878 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
879 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
881 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
882 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
884 /* step 11. XIF Configuration */
885 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
887 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
888 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
890 /* step 12. RX_MAC Configuration Register */
891 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
892 v |= HME_MAC_RXCFG_ENABLE;
893 v &= ~(HME_MAC_RXCFG_DCRCS);
894 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
895 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
897 /* step 13. TX_MAC Configuration Register */
898 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
899 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
900 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
901 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
903 /* step 14. Issue Transmit Pending command */
906 /* Debug: double-check. */
907 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
908 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
909 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
910 HME_ERX_READ_4(sc, HME_ERXI_RING),
911 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
912 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
913 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
914 HME_ERX_READ_4(sc, HME_ERXI_CFG),
915 HME_ETX_READ_4(sc, HME_ETXI_CFG));
916 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
917 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
918 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
921 ifp->if_drv_flags |= IFF_DRV_RUNNING;
922 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
924 /* Set the current media. */
925 hme_mediachange_locked(sc);
927 /* Start the one second timer. */
928 sc->sc_wdog_timer = 0;
929 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
933 * Routine to DMA map an mbuf chain, set up the descriptor rings
934 * accordingly and start the transmission.
935 * Returns 0 on success, -1 if there were not enough free descriptors
936 * to map the packet, or an errno otherwise.
938 * XXX: this relies on the fact that segments returned by
939 * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
940 * boundary on (i.e. potentially before ds_addr) to the first
941 * boundary beyond the end. This is usually a safe assumption to
942 * make, but is not documented.
945 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
947 bus_dma_segment_t segs[HME_NTXSEGS];
948 struct hme_txdesc *htx;
952 int error, i, nsegs, pci, ri, si;
953 uint32_t cflags, flags;
955 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
959 if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
960 if (M_WRITABLE(*m0) == 0) {
961 m = m_dup(*m0, M_NOWAIT);
967 i = sizeof(struct ether_header);
968 m = m_pullup(*m0, i + sizeof(struct ip));
973 ip = (struct ip *)(mtod(m, caddr_t) + i);
974 i += (ip->ip_hl << 2);
975 cflags = i << HME_XD_TXCKSUM_SSHIFT |
976 ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
981 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
982 *m0, segs, &nsegs, 0);
983 if (error == EFBIG) {
984 m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS);
991 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
992 *m0, segs, &nsegs, 0);
998 } else if (error != 0)
1000 /* If nsegs is wrong then the stack is corrupt. */
1001 KASSERT(nsegs <= HME_NTXSEGS,
1002 ("%s: too many DMA segments (%d)", __func__, nsegs));
1008 if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
1009 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1010 /* Retry with m_collapse(9)? */
1013 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
1015 si = ri = sc->sc_rb.rb_tdhead;
1016 txd = sc->sc_rb.rb_txd;
1017 pci = sc->sc_flags & HME_PCI;
1018 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
1019 HME_XD_GETFLAGS(pci, txd, ri));
1020 for (i = 0; i < nsegs; i++) {
1021 /* Fill the ring entry. */
1022 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
1024 flags |= HME_XD_SOP | cflags;
1026 flags |= HME_XD_OWN | cflags;
1027 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
1029 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
1030 HME_XD_SETFLAGS(pci, txd, ri, flags);
1031 sc->sc_rb.rb_td_nbusy++;
1032 htx->htx_lastdesc = ri;
1033 ri = (ri + 1) % HME_NTXDESC;
1035 sc->sc_rb.rb_tdhead = ri;
1037 /* set EOP on the last descriptor */
1038 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1039 flags = HME_XD_GETFLAGS(pci, txd, ri);
1040 flags |= HME_XD_EOP;
1041 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1043 HME_XD_SETFLAGS(pci, txd, ri, flags);
1045 /* Turn the first descriptor ownership to the hme */
1046 flags = HME_XD_GETFLAGS(pci, txd, si);
1047 flags |= HME_XD_OWN;
1048 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1050 HME_XD_SETFLAGS(pci, txd, si, flags);
1052 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1053 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1056 /* start the transmission. */
1057 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1063 * Pass a packet to the higher levels.
1066 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1068 struct ifnet *ifp = sc->sc_ifp;
1071 if (len <= sizeof(struct ether_header) ||
1072 len > HME_MAX_FRAMESIZE) {
1074 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1077 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1078 hme_discard_rxbuf(sc, ix);
1082 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1083 CTR1(KTR_HME, "hme_read: len %d", len);
1085 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1087 * hme_add_rxbuf will leave the old buffer in the ring until
1088 * it is sure that a new buffer can be mapped. If it can not,
1089 * drop the packet, but leave the interface up.
1091 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1092 hme_discard_rxbuf(sc, ix);
1096 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1098 m->m_pkthdr.rcvif = ifp;
1099 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1100 m_adj(m, HME_RXOFFS);
1101 /* RX TCP/UDP checksum */
1102 if (ifp->if_capenable & IFCAP_RXCSUM)
1103 hme_rxcksum(m, flags);
1104 /* Pass the packet up. */
1106 (*ifp->if_input)(ifp, m);
1111 hme_start(struct ifnet *ifp)
1113 struct hme_softc *sc = ifp->if_softc;
1116 hme_start_locked(ifp);
1121 hme_start_locked(struct ifnet *ifp)
1123 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1127 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1128 IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
1131 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1132 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1133 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1137 error = hme_load_txmbuf(sc, &m);
1141 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1142 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1150 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1151 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1152 sc->sc_wdog_timer = 5;
1157 * Transmit interrupt.
1160 hme_tint(struct hme_softc *sc)
1163 struct ifnet *ifp = sc->sc_ifp;
1164 struct hme_txdesc *htx;
1165 unsigned int ri, txflags;
1167 txd = sc->sc_rb.rb_txd;
1168 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1169 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1170 /* Fetch current position in the transmit ring */
1171 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1172 if (sc->sc_rb.rb_td_nbusy <= 0) {
1173 CTR0(KTR_HME, "hme_tint: not busy!");
1177 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
1178 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1180 if ((txflags & HME_XD_OWN) != 0)
1183 CTR0(KTR_HME, "hme_tint: not owned");
1184 --sc->sc_rb.rb_td_nbusy;
1185 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1187 /* Complete packet transmitted? */
1188 if ((txflags & HME_XD_EOP) == 0)
1191 KASSERT(htx->htx_lastdesc == ri,
1192 ("%s: ring indices skewed: %d != %d!",
1193 __func__, htx->htx_lastdesc, ri));
1194 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1195 BUS_DMASYNC_POSTWRITE);
1196 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1198 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1199 m_freem(htx->htx_m);
1201 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1202 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1203 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1205 sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
1208 sc->sc_rb.rb_tdtail = ri;
1210 hme_start_locked(ifp);
1214 * RX TCP/UDP checksum
1217 hme_rxcksum(struct mbuf *m, u_int32_t flags)
1219 struct ether_header *eh;
1222 int32_t hlen, len, pktlen;
1223 u_int16_t cksum, *opts;
1226 pktlen = m->m_pkthdr.len;
1227 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1229 eh = mtod(m, struct ether_header *);
1230 if (eh->ether_type != htons(ETHERTYPE_IP))
1232 ip = (struct ip *)(eh + 1);
1233 if (ip->ip_v != IPVERSION)
1236 hlen = ip->ip_hl << 2;
1237 pktlen -= sizeof(struct ether_header);
1238 if (hlen < sizeof(struct ip))
1240 if (ntohs(ip->ip_len) < hlen)
1242 if (ntohs(ip->ip_len) != pktlen)
1244 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1245 return; /* can't handle fragmented packet */
1249 if (pktlen < (hlen + sizeof(struct tcphdr)))
1253 if (pktlen < (hlen + sizeof(struct udphdr)))
1255 uh = (struct udphdr *)((caddr_t)ip + hlen);
1256 if (uh->uh_sum == 0)
1257 return; /* no checksum */
1263 cksum = ~(flags & HME_XD_RXCKSUM);
1264 /* checksum fixup for IP options */
1265 len = hlen - sizeof(struct ip);
1267 opts = (u_int16_t *)(ip + 1);
1268 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1269 temp32 = cksum - *opts;
1270 temp32 = (temp32 >> 16) + (temp32 & 65535);
1271 cksum = temp32 & 65535;
1274 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1275 m->m_pkthdr.csum_data = cksum;
1279 * Receive interrupt.
1282 hme_rint(struct hme_softc *sc)
1284 caddr_t xdr = sc->sc_rb.rb_rxd;
1285 struct ifnet *ifp = sc->sc_ifp;
1286 unsigned int ri, len;
1291 * Process all buffers with valid data.
1293 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1294 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1295 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
1296 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1297 if ((flags & HME_XD_OWN) != 0)
1301 if ((flags & HME_XD_OFL) != 0) {
1302 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1303 "flags=0x%x\n", ri, flags);
1304 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1305 hme_discard_rxbuf(sc, ri);
1307 len = HME_XD_DECODE_RSIZE(flags);
1308 hme_read(sc, ri, len, flags);
1312 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1313 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1315 sc->sc_rb.rb_rdtail = ri;
1319 hme_eint(struct hme_softc *sc, u_int status)
1322 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1323 device_printf(sc->sc_dev, "XXXlink status changed: "
1324 "cfg=%#x, stat=%#x, sm=%#x\n",
1325 HME_MIF_READ_4(sc, HME_MIFI_CFG),
1326 HME_MIF_READ_4(sc, HME_MIFI_STAT),
1327 HME_MIF_READ_4(sc, HME_MIFI_SM));
1331 /* check for fatal errors that needs reset to unfreeze DMA engine */
1332 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1333 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1334 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1335 hme_init_locked(sc);
1342 struct hme_softc *sc = (struct hme_softc *)v;
1346 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1347 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1349 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1350 hme_eint(sc, status);
1352 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1355 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1361 hme_watchdog(struct hme_softc *sc)
1363 struct ifnet *ifp = sc->sc_ifp;
1365 HME_LOCK_ASSERT(sc, MA_OWNED);
1368 CTR1(KTR_HME, "hme_watchdog: status %x",
1369 (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
1372 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1375 if ((sc->sc_flags & HME_LINK) != 0)
1376 device_printf(sc->sc_dev, "device timeout\n");
1377 else if (bootverbose)
1378 device_printf(sc->sc_dev, "device timeout (no link)\n");
1379 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1381 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1382 hme_init_locked(sc);
1383 hme_start_locked(ifp);
1384 return (EJUSTRETURN);
1388 * Initialize the MII Management Interface
1391 hme_mifinit(struct hme_softc *sc)
1396 * Configure the MIF in frame mode, polling disabled, internal PHY
1399 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
1402 * If the currently selected media uses the external transceiver,
1403 * enable its MII drivers (which basically isolates the internal
1404 * one and vice versa). In case the current media hasn't been set,
1405 * yet, we default to the internal transceiver.
1407 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
1408 if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
1409 sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
1411 v |= HME_MAC_XIF_MIIENABLE;
1413 v &= ~HME_MAC_XIF_MIIENABLE;
1414 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
1421 hme_mii_readreg(device_t dev, int phy, int reg)
1423 struct hme_softc *sc;
1427 sc = device_get_softc(dev);
1428 /* Select the desired PHY in the MIF configuration register */
1429 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1430 if (phy == HME_PHYAD_EXTERNAL)
1431 v |= HME_MIF_CFG_PHY;
1433 v &= ~HME_MIF_CFG_PHY;
1434 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1436 /* Construct the frame command */
1437 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1439 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1440 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1441 (reg << HME_MIF_FO_REGAD_SHIFT);
1443 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1444 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1445 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1446 for (n = 0; n < 100; n++) {
1448 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1449 if (v & HME_MIF_FO_TALSB)
1450 return (v & HME_MIF_FO_DATA);
1453 device_printf(sc->sc_dev, "mii_read timeout\n");
1458 hme_mii_writereg(device_t dev, int phy, int reg, int val)
1460 struct hme_softc *sc;
1464 sc = device_get_softc(dev);
1465 /* Select the desired PHY in the MIF configuration register */
1466 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1467 if (phy == HME_PHYAD_EXTERNAL)
1468 v |= HME_MIF_CFG_PHY;
1470 v &= ~HME_MIF_CFG_PHY;
1471 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1473 /* Construct the frame command */
1474 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1476 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1477 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1478 (reg << HME_MIF_FO_REGAD_SHIFT) |
1479 (val & HME_MIF_FO_DATA);
1481 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1482 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1483 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1484 for (n = 0; n < 100; n++) {
1486 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1487 if (v & HME_MIF_FO_TALSB)
1491 device_printf(sc->sc_dev, "mii_write timeout\n");
1496 hme_mii_statchg(device_t dev)
1498 struct hme_softc *sc;
1499 uint32_t rxcfg, txcfg;
1501 sc = device_get_softc(dev);
1504 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1505 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
1508 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1509 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1510 sc->sc_flags |= HME_LINK;
1512 sc->sc_flags &= ~HME_LINK;
1514 txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1515 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
1516 HME_MAC_TXCFG_ENABLE, 0))
1517 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
1518 rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1519 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
1520 HME_MAC_RXCFG_ENABLE, 0))
1521 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1523 /* Set the MAC Full Duplex bit appropriately. */
1524 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1525 txcfg |= HME_MAC_TXCFG_FULLDPLX;
1527 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
1528 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
1530 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1531 (sc->sc_flags & HME_LINK) != 0) {
1532 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
1533 HME_MAC_TXCFG_ENABLE))
1534 device_printf(sc->sc_dev, "cannot enable TX MAC\n");
1535 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
1536 HME_MAC_RXCFG_ENABLE))
1537 device_printf(sc->sc_dev, "cannot enable RX MAC\n");
1542 hme_mediachange(struct ifnet *ifp)
1544 struct hme_softc *sc = ifp->if_softc;
1548 error = hme_mediachange_locked(sc);
1554 hme_mediachange_locked(struct hme_softc *sc)
1556 struct mii_softc *child;
1558 HME_LOCK_ASSERT(sc, MA_OWNED);
1561 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1562 device_printf(sc->sc_dev, "hme_mediachange_locked");
1568 * If both PHYs are present reset them. This is required for
1569 * unisolating the previously isolated PHY when switching PHYs.
1570 * As the above hme_mifinit() call will set the MII drivers in
1571 * the XIF configuration register according to the currently
1572 * selected media, there should be no window during which the
1573 * data paths of both transceivers are open at the same time,
1574 * even if the PHY device drivers use MIIF_NOISOLATE.
1576 if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
1577 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
1579 return (mii_mediachg(sc->sc_mii));
1583 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1585 struct hme_softc *sc = ifp->if_softc;
1588 if ((ifp->if_flags & IFF_UP) == 0) {
1593 mii_pollstat(sc->sc_mii);
1594 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1595 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1600 * Process an ioctl request.
1603 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1605 struct hme_softc *sc = ifp->if_softc;
1606 struct ifreq *ifr = (struct ifreq *)data;
1612 if ((ifp->if_flags & IFF_UP) != 0) {
1613 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1614 ((ifp->if_flags ^ sc->sc_ifflags) &
1615 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1616 hme_setladrf(sc, 1);
1618 hme_init_locked(sc);
1619 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1621 if ((ifp->if_flags & IFF_LINK0) != 0)
1622 sc->sc_csum_features |= CSUM_UDP;
1624 sc->sc_csum_features &= ~CSUM_UDP;
1625 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1626 ifp->if_hwassist = sc->sc_csum_features;
1627 sc->sc_ifflags = ifp->if_flags;
1634 hme_setladrf(sc, 1);
1640 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1644 ifp->if_capenable = ifr->ifr_reqcap;
1645 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1646 ifp->if_hwassist = sc->sc_csum_features;
1648 ifp->if_hwassist = 0;
1652 error = ether_ioctl(ifp, cmd, data);
1660 hme_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
1662 uint32_t crc, *hash = arg;
1664 crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
1665 /* Just want the 6 most significant bits. */
1667 /* Set the corresponding bit in the filter. */
1668 hash[crc >> 4] |= 1 << (crc & 0xf);
1674 * Set up the logical address filter.
1677 hme_setladrf(struct hme_softc *sc, int reenable)
1679 struct ifnet *ifp = sc->sc_ifp;
1683 HME_LOCK_ASSERT(sc, MA_OWNED);
1684 /* Clear the hash table. */
1685 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1687 /* Get the current RX configuration. */
1688 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1691 * Turn off promiscuous mode, promiscuous group mode (all multicast),
1692 * and hash filter. Depending on the case, the right bit will be
1695 macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
1698 * Disable the receiver while changing it's state as the documentation
1700 * We then must wait until the bit clears in the register. This should
1701 * take at most 3.5ms.
1703 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1704 HME_MAC_RXCFG_ENABLE, 0))
1705 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1706 /* Disable the hash filter before writing to the filter registers. */
1707 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1708 HME_MAC_RXCFG_HENABLE, 0))
1709 device_printf(sc->sc_dev, "cannot disable hash filter\n");
1711 /* Make the RX MAC really SIMPLEX. */
1712 macc |= HME_MAC_RXCFG_ME;
1714 macc |= HME_MAC_RXCFG_ENABLE;
1716 macc &= ~HME_MAC_RXCFG_ENABLE;
1718 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1719 macc |= HME_MAC_RXCFG_PMISC;
1722 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1723 macc |= HME_MAC_RXCFG_PGRP;
1727 macc |= HME_MAC_RXCFG_HENABLE;
1730 * Set up multicast address filter by passing all multicast addresses
1731 * through a crc generator, and then using the high order 6 bits as an
1732 * index into the 64 bit logical address filter. The high order bit
1733 * selects the word, while the rest of the bits select the bit within
1736 if_foreach_llmaddr(ifp, hme_hash_maddr, &hash);
1739 /* Now load the hash table into the chip */
1740 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1741 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1742 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1743 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1744 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1745 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1747 device_printf(sc->sc_dev, "cannot configure RX MAC\n");