2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
6 * This code is derived from software contributed to The NetBSD Foundation
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
37 * from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 * HME Ethernet module driver.
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58 * In reality, we can do the same technique for UDP datagram too. However,
59 * the hardware doesn't compensate the checksum for UDP datagram which can yield
60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61 * can be reactivated by setting special link option link0 with ifconfig(8).
63 #define HME_CSUM_FEATURES (CSUM_TCP)
67 #define KTR_HME KTR_CT2 /* XXX */
69 #include <sys/param.h>
70 #include <sys/systm.h>
72 #include <sys/endian.h>
73 #include <sys/kernel.h>
74 #include <sys/module.h>
77 #include <sys/malloc.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
82 #include <net/ethernet.h>
84 #include <net/if_arp.h>
85 #include <net/if_dl.h>
86 #include <net/if_media.h>
87 #include <net/if_types.h>
88 #include <net/if_vlan_var.h>
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/tcp.h>
94 #include <netinet/udp.h>
96 #include <dev/mii/mii.h>
97 #include <dev/mii/miivar.h>
99 #include <machine/bus.h>
101 #include <dev/hme/if_hmereg.h>
102 #include <dev/hme/if_hmevar.h>
104 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
105 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
107 static void hme_start(struct ifnet *);
108 static void hme_start_locked(struct ifnet *);
109 static void hme_stop(struct hme_softc *);
110 static int hme_ioctl(struct ifnet *, u_long, caddr_t);
111 static void hme_tick(void *);
112 static int hme_watchdog(struct hme_softc *);
113 static void hme_init(void *);
114 static void hme_init_locked(struct hme_softc *);
115 static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
116 static int hme_meminit(struct hme_softc *);
117 static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
118 u_int32_t, u_int32_t);
119 static void hme_mifinit(struct hme_softc *);
120 static void hme_setladrf(struct hme_softc *, int);
122 static int hme_mediachange(struct ifnet *);
123 static int hme_mediachange_locked(struct hme_softc *);
124 static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
126 static int hme_load_txmbuf(struct hme_softc *, struct mbuf **);
127 static void hme_read(struct hme_softc *, int, int, u_int32_t);
128 static void hme_eint(struct hme_softc *, u_int);
129 static void hme_rint(struct hme_softc *);
130 static void hme_tint(struct hme_softc *);
131 static void hme_rxcksum(struct mbuf *, u_int32_t);
133 static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
135 devclass_t hme_devclass;
139 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
140 MODULE_DEPEND(hme, miibus, 1, 1, 1);
142 #define HME_SPC_READ_4(spc, sc, offs) \
143 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
145 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
146 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
148 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
149 bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
152 #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
153 #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
154 #define HME_SEB_BARRIER(sc, offs, l, f) \
155 HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
156 #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
157 #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
158 #define HME_ERX_BARRIER(sc, offs, l, f) \
159 HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
160 #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
161 #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
162 #define HME_ETX_BARRIER(sc, offs, l, f) \
163 HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
164 #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
165 #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
166 #define HME_MAC_BARRIER(sc, offs, l, f) \
167 HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
168 #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
169 #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
170 #define HME_MIF_BARRIER(sc, offs, l, f) \
171 HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
174 #define HME_WHINE(dev, ...) do { \
175 if (hme_nerr++ < HME_MAXERR) \
176 device_printf(dev, __VA_ARGS__); \
177 if (hme_nerr == HME_MAXERR) { \
178 device_printf(dev, "too many errors; not reporting " \
183 /* Support oversized VLAN frames. */
184 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
187 hme_config(struct hme_softc *sc)
190 struct mii_softc *child;
192 int error, rdesc, tdesc, i;
194 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
199 * HME common initialization.
201 * hme_softc fields that must be initialized by the front-end:
206 * the bus handles, tags and offsets (splitted for SBus compatability):
207 * sc_seb{t,h,o} (Shared Ethernet Block registers)
208 * sc_erx{t,h,o} (Receiver Unit registers)
209 * sc_etx{t,h,o} (Transmitter Unit registers)
210 * sc_mac{t,h,o} (MAC registers)
211 * sc_mif{t,h,o} (Management Interface registers)
213 * the maximum bus burst size:
218 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
220 /* Make sure the chip is stopped. */
225 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
226 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
227 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
228 NULL, NULL, &sc->sc_pdmatag);
233 * Create control, RX and TX mbuf DMA tags.
234 * Buffer descriptors must be aligned on a 2048 byte boundary;
235 * take this into account when calculating the size. Note that
236 * the maximum number of descriptors (256) occupies 2048 bytes,
237 * so we allocate that much regardless of HME_N*DESC.
240 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
241 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
242 1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
246 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
247 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
248 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
252 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
253 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
254 MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
255 NULL, NULL, &sc->sc_tdmatag);
259 /* Allocate the control DMA buffer. */
260 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
261 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
263 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
267 /* Load the control DMA buffer. */
268 sc->sc_rb.rb_dmabase = 0;
269 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
270 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
271 sc->sc_rb.rb_dmabase == 0) {
272 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
276 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
277 sc->sc_rb.rb_dmabase);
280 * Prepare the RX descriptors. rdesc serves as marker for the last
281 * processed descriptor and may be used later on.
283 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
284 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
285 error = bus_dmamap_create(sc->sc_rdmatag, 0,
286 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
290 error = bus_dmamap_create(sc->sc_rdmatag, 0,
291 &sc->sc_rb.rb_spare_dmamap);
294 /* Same for the TX descs. */
295 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
296 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
297 error = bus_dmamap_create(sc->sc_tdmatag, 0,
298 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
303 sc->sc_csum_features = HME_CSUM_FEATURES;
304 /* Initialize ifnet structure. */
306 if_initname(ifp, device_get_name(sc->sc_dev),
307 device_get_unit(sc->sc_dev));
308 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
309 ifp->if_start = hme_start;
310 ifp->if_ioctl = hme_ioctl;
311 ifp->if_init = hme_init;
312 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
313 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
314 IFQ_SET_READY(&ifp->if_snd);
318 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
319 hme_mediastatus)) != 0) {
320 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
323 sc->sc_mii = device_get_softc(sc->sc_miibus);
326 * Walk along the list of attached MII devices and
327 * establish an `MII instance' to `PHY number'
328 * mapping. We'll use this mapping to enable the MII
329 * drivers of the external transceiver according to
330 * the currently selected media.
332 sc->sc_phys[0] = sc->sc_phys[1] = -1;
333 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
335 * Note: we support just two PHYs: the built-in
336 * internal device and an external on the MII
339 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
340 child->mii_phy != HME_PHYAD_INTERNAL) ||
341 child->mii_inst > 1) {
342 device_printf(sc->sc_dev, "cannot accommodate "
343 "MII device %s at phy %d, instance %d\n",
344 device_get_name(child->mii_dev),
345 child->mii_phy, child->mii_inst);
349 sc->sc_phys[child->mii_inst] = child->mii_phy;
352 /* Attach the interface. */
353 ether_ifattach(ifp, sc->sc_enaddr);
356 * Tell the upper layer(s) we support long frames/checksum offloads.
358 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
359 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
360 ifp->if_hwassist |= sc->sc_csum_features;
361 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
365 for (i = 0; i < tdesc; i++) {
366 bus_dmamap_destroy(sc->sc_tdmatag,
367 sc->sc_rb.rb_txdesc[i].htx_dmamap);
369 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
371 for (i = 0; i < rdesc; i++) {
372 bus_dmamap_destroy(sc->sc_rdmatag,
373 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
375 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
377 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
379 bus_dma_tag_destroy(sc->sc_tdmatag);
381 bus_dma_tag_destroy(sc->sc_rdmatag);
383 bus_dma_tag_destroy(sc->sc_cdmatag);
385 bus_dma_tag_destroy(sc->sc_pdmatag);
392 hme_detach(struct hme_softc *sc)
394 struct ifnet *ifp = sc->sc_ifp;
400 callout_drain(&sc->sc_tick_ch);
403 device_delete_child(sc->sc_dev, sc->sc_miibus);
405 for (i = 0; i < HME_NTXQ; i++) {
406 bus_dmamap_destroy(sc->sc_tdmatag,
407 sc->sc_rb.rb_txdesc[i].htx_dmamap);
409 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
410 for (i = 0; i < HME_NRXDESC; i++) {
411 bus_dmamap_destroy(sc->sc_rdmatag,
412 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
414 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
415 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
416 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
417 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
418 bus_dma_tag_destroy(sc->sc_tdmatag);
419 bus_dma_tag_destroy(sc->sc_rdmatag);
420 bus_dma_tag_destroy(sc->sc_cdmatag);
421 bus_dma_tag_destroy(sc->sc_pdmatag);
425 hme_suspend(struct hme_softc *sc)
434 hme_resume(struct hme_softc *sc)
436 struct ifnet *ifp = sc->sc_ifp;
439 if ((ifp->if_flags & IFF_UP) != 0)
445 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
447 struct hme_softc *sc = (struct hme_softc *)xsc;
452 ("%s: too many DMA segments (%d)", __func__, nsegs));
453 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
459 struct hme_softc *sc = arg;
462 HME_LOCK_ASSERT(sc, MA_OWNED);
466 * Unload collision counters
468 ifp->if_collisions +=
469 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
470 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
471 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
472 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
475 * then clear the hardware counters.
477 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
478 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
479 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
480 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
482 mii_tick(sc->sc_mii);
484 if (hme_watchdog(sc) == EJUSTRETURN)
487 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
491 hme_stop(struct hme_softc *sc)
496 callout_stop(&sc->sc_tick_ch);
497 sc->sc_wdog_timer = 0;
498 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
499 sc->sc_flags &= ~HME_LINK;
501 /* Mask all interrupts */
502 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
504 /* Reset transmitter and receiver */
505 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
507 HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
508 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
509 for (n = 0; n < 20; n++) {
510 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
511 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
516 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
520 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
521 * ring for subsequent use.
524 hme_discard_rxbuf(struct hme_softc *sc, int ix)
528 * Dropped a packet, reinitialize the descriptor and turn the
529 * ownership back to the hardware.
531 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
532 ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
533 &sc->sc_rb.rb_rxdesc[ix])));
537 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
539 struct hme_rxdesc *rd;
541 bus_dma_segment_t segs[1];
546 rd = &sc->sc_rb.rb_rxdesc[ri];
547 unmap = rd->hrx_m != NULL;
548 if (unmap && keepold) {
550 * Reinitialize the descriptor flags, as they may have been
551 * altered by the hardware.
553 hme_discard_rxbuf(sc, ri);
556 if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
558 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
559 b = mtod(m, uintptr_t);
561 * Required alignment boundary. At least 16 is needed, but since
562 * the mapping must be done in a way that a burst can start on a
563 * natural boundary we might need to extend this.
565 a = imax(HME_MINRXALIGN, sc->sc_burst);
567 * Make sure the buffer suitably aligned. The 2 byte offset is removed
568 * when the mbuf is handed up. XXX: this ensures at least 16 byte
569 * alignment of the header adjacent to the ethernet header, which
570 * should be sufficient in all cases. Nevertheless, this second-guesses
573 m_adj(m, roundup2(b, a) - b);
574 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
575 m, segs, &nsegs, 0) != 0) {
579 /* If nsegs is wrong then the stack is corrupt. */
581 ("%s: too many DMA segments (%d)", __func__, nsegs));
583 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
584 BUS_DMASYNC_POSTREAD);
585 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
587 map = rd->hrx_dmamap;
588 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
589 sc->sc_rb.rb_spare_dmamap = map;
590 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
591 HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
594 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
595 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
600 hme_meminit(struct hme_softc *sc)
602 struct hme_ring *hr = &sc->sc_rb;
603 struct hme_txdesc *td;
610 dma = hr->rb_dmabase;
613 * Allocate transmit descriptors
617 p += HME_NTXDESC * HME_XD_SIZE;
618 dma += HME_NTXDESC * HME_XD_SIZE;
620 * We have reserved descriptor space until the next 2048 byte
623 dma = (bus_addr_t)roundup((u_long)dma, 2048);
624 p = (caddr_t)roundup((u_long)p, 2048);
627 * Allocate receive descriptors
631 p += HME_NRXDESC * HME_XD_SIZE;
632 dma += HME_NRXDESC * HME_XD_SIZE;
633 /* Again move forward to the next 2048 byte boundary.*/
634 dma = (bus_addr_t)roundup((u_long)dma, 2048);
635 p = (caddr_t)roundup((u_long)p, 2048);
638 * Initialize transmit buffer descriptors
640 for (i = 0; i < HME_NTXDESC; i++) {
641 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
642 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
645 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
646 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
647 for (i = 0; i < HME_NTXQ; i++) {
648 td = &sc->sc_rb.rb_txdesc[i];
649 if (td->htx_m != NULL) {
650 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
651 BUS_DMASYNC_POSTWRITE);
652 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
656 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
660 * Initialize receive buffer descriptors
662 for (i = 0; i < HME_NRXDESC; i++) {
663 error = hme_add_rxbuf(sc, i, 1);
668 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
669 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
671 hr->rb_tdhead = hr->rb_tdtail = 0;
674 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
676 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
678 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
679 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
680 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
681 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
686 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
687 u_int32_t clr, u_int32_t set)
693 HME_MAC_WRITE_4(sc, reg, val);
694 HME_MAC_BARRIER(sc, reg, 4,
695 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
696 if (clr == 0 && set == 0)
697 return (1); /* just write, no bits to wait for */
701 val = HME_MAC_READ_4(sc, reg);
703 /* After 3.5ms, we should have been done. */
704 device_printf(sc->sc_dev, "timeout while writing to "
705 "MAC configuration register\n");
708 } while ((val & clr) != 0 && (val & set) != set);
713 * Initialization of interface; set up initialization block
714 * and transmit/receive descriptor rings.
719 struct hme_softc *sc = (struct hme_softc *)xsc;
727 hme_init_locked(struct hme_softc *sc)
729 struct ifnet *ifp = sc->sc_ifp;
733 HME_LOCK_ASSERT(sc, MA_OWNED);
735 * Initialization sequence. The numbered steps below correspond
736 * to the sequence outlined in section 6.3.5.1 in the Ethernet
737 * Channel Engine manual (part of the PCIO manual).
738 * See also the STP2002-STQ document from Sun Microsystems.
741 /* step 1 & 2. Reset the Ethernet Channel */
744 /* Re-initialize the MIF */
748 /* Mask all MIF interrupts, just in case */
749 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
752 /* step 3. Setup data structures in host memory */
753 if (hme_meminit(sc) != 0) {
754 device_printf(sc->sc_dev, "out of buffers; init aborted.");
758 /* step 4. TX MAC registers & counters */
759 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
760 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
761 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
762 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
763 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
765 /* Load station MAC address */
767 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
768 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
769 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
772 * Init seed for backoff
773 * (source suggested by manual: low 10 bits of MAC address)
775 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
776 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
778 /* Note: Accepting power-on default for other MAC registers here.. */
780 /* step 5. RX MAC registers & counters */
783 /* step 6 & 7. Program Descriptor Ring Base Addresses */
784 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
785 /* Transmit Descriptor ring size: in increments of 16 */
786 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
788 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
789 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
791 /* step 8. Global Configuration & Interrupt Mask */
792 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
793 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
794 HME_SEB_STAT_HOSTTOTX |
795 HME_SEB_STAT_RXTOHOST |
797 HME_SEB_STAT_TXPERR |
798 HME_SEB_STAT_RCNTEXP |
799 HME_SEB_STAT_ALL_ERRORS ));
801 switch (sc->sc_burst) {
806 v = HME_SEB_CFG_BURST16;
809 v = HME_SEB_CFG_BURST32;
812 v = HME_SEB_CFG_BURST64;
816 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
817 * Allowing 64bit transfers breaks TX checksum offload as well.
818 * Don't know this comes from hardware bug or driver's DMAing
821 * if (sc->sc_flags & HME_PCI == 0)
822 * v |= HME_SEB_CFG_64BIT;
824 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
826 /* step 9. ETX Configuration: use mostly default values */
829 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
830 v |= HME_ETX_CFG_DMAENABLE;
831 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
833 /* step 10. ERX Configuration */
834 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
836 /* Encode Receive Descriptor ring size: four possible values */
837 v &= ~HME_ERX_CFG_RINGSIZEMSK;
838 switch (HME_NRXDESC) {
840 v |= HME_ERX_CFG_RINGSIZE32;
843 v |= HME_ERX_CFG_RINGSIZE64;
846 v |= HME_ERX_CFG_RINGSIZE128;
849 v |= HME_ERX_CFG_RINGSIZE256;
852 printf("hme: invalid Receive Descriptor ring size\n");
856 /* Enable DMA, fix RX first byte offset. */
857 v &= ~HME_ERX_CFG_FBO_MASK;
858 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
859 /* RX TCP/UDP checksum offset */
860 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
861 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
863 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
864 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
866 /* step 11. XIF Configuration */
867 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
869 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
870 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
872 /* step 12. RX_MAC Configuration Register */
873 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
874 v |= HME_MAC_RXCFG_ENABLE;
875 v &= ~(HME_MAC_RXCFG_DCRCS);
876 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
877 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
879 /* step 13. TX_MAC Configuration Register */
880 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
881 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
882 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
883 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
885 /* step 14. Issue Transmit Pending command */
888 /* Debug: double-check. */
889 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
890 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
891 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
892 HME_ERX_READ_4(sc, HME_ERXI_RING),
893 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
894 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
895 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
896 HME_ERX_READ_4(sc, HME_ERXI_CFG),
897 HME_ETX_READ_4(sc, HME_ETXI_CFG));
898 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
899 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
900 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
903 ifp->if_drv_flags |= IFF_DRV_RUNNING;
904 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
906 /* Set the current media. */
907 hme_mediachange_locked(sc);
909 /* Start the one second timer. */
910 sc->sc_wdog_timer = 0;
911 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
915 * Routine to DMA map an mbuf chain, set up the descriptor rings
916 * accordingly and start the transmission.
917 * Returns 0 on success, -1 if there were not enough free descriptors
918 * to map the packet, or an errno otherwise.
920 * XXX: this relies on the fact that segments returned by
921 * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
922 * boundary on (i.e. potentially before ds_addr) to the first
923 * boundary beyond the end. This is usually a safe assumption to
924 * make, but is not documented.
927 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
929 bus_dma_segment_t segs[HME_NTXSEGS];
930 struct hme_txdesc *htx;
934 int error, i, nsegs, pci, ri, si;
935 uint32_t cflags, flags;
937 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
941 if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
942 if (M_WRITABLE(*m0) == 0) {
943 m = m_dup(*m0, M_DONTWAIT);
949 i = sizeof(struct ether_header);
950 m = m_pullup(*m0, i + sizeof(struct ip));
955 ip = (struct ip *)(mtod(m, caddr_t) + i);
956 i += (ip->ip_hl << 2);
957 cflags = i << HME_XD_TXCKSUM_SSHIFT |
958 ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
963 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
964 *m0, segs, &nsegs, 0);
965 if (error == EFBIG) {
966 m = m_collapse(*m0, M_DONTWAIT, HME_NTXSEGS);
973 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
974 *m0, segs, &nsegs, 0);
980 } else if (error != 0)
982 /* If nsegs is wrong then the stack is corrupt. */
983 KASSERT(nsegs <= HME_NTXSEGS,
984 ("%s: too many DMA segments (%d)", __func__, nsegs));
990 if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
991 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
992 /* Retry with m_collapse(9)? */
995 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
997 si = ri = sc->sc_rb.rb_tdhead;
998 txd = sc->sc_rb.rb_txd;
999 pci = sc->sc_flags & HME_PCI;
1000 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
1001 HME_XD_GETFLAGS(pci, txd, ri));
1002 for (i = 0; i < nsegs; i++) {
1003 /* Fill the ring entry. */
1004 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
1006 flags |= HME_XD_SOP | cflags;
1008 flags |= HME_XD_OWN | cflags;
1009 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
1011 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
1012 HME_XD_SETFLAGS(pci, txd, ri, flags);
1013 sc->sc_rb.rb_td_nbusy++;
1014 htx->htx_lastdesc = ri;
1015 ri = (ri + 1) % HME_NTXDESC;
1017 sc->sc_rb.rb_tdhead = ri;
1019 /* set EOP on the last descriptor */
1020 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1021 flags = HME_XD_GETFLAGS(pci, txd, ri);
1022 flags |= HME_XD_EOP;
1023 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1025 HME_XD_SETFLAGS(pci, txd, ri, flags);
1027 /* Turn the first descriptor ownership to the hme */
1028 flags = HME_XD_GETFLAGS(pci, txd, si);
1029 flags |= HME_XD_OWN;
1030 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1032 HME_XD_SETFLAGS(pci, txd, si, flags);
1034 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1035 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1038 /* start the transmission. */
1039 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1045 * Pass a packet to the higher levels.
1048 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1050 struct ifnet *ifp = sc->sc_ifp;
1053 if (len <= sizeof(struct ether_header) ||
1054 len > HME_MAX_FRAMESIZE) {
1056 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1060 hme_discard_rxbuf(sc, ix);
1064 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1065 CTR1(KTR_HME, "hme_read: len %d", len);
1067 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1069 * hme_add_rxbuf will leave the old buffer in the ring until
1070 * it is sure that a new buffer can be mapped. If it can not,
1071 * drop the packet, but leave the interface up.
1074 hme_discard_rxbuf(sc, ix);
1080 m->m_pkthdr.rcvif = ifp;
1081 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1082 m_adj(m, HME_RXOFFS);
1083 /* RX TCP/UDP checksum */
1084 if (ifp->if_capenable & IFCAP_RXCSUM)
1085 hme_rxcksum(m, flags);
1086 /* Pass the packet up. */
1088 (*ifp->if_input)(ifp, m);
1093 hme_start(struct ifnet *ifp)
1095 struct hme_softc *sc = ifp->if_softc;
1098 hme_start_locked(ifp);
1103 hme_start_locked(struct ifnet *ifp)
1105 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1109 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1110 IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
1113 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1114 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1115 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1119 error = hme_load_txmbuf(sc, &m);
1123 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1124 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1132 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1133 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1134 sc->sc_wdog_timer = 5;
1139 * Transmit interrupt.
1142 hme_tint(struct hme_softc *sc)
1145 struct ifnet *ifp = sc->sc_ifp;
1146 struct hme_txdesc *htx;
1147 unsigned int ri, txflags;
1149 txd = sc->sc_rb.rb_txd;
1150 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1151 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1152 /* Fetch current position in the transmit ring */
1153 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1154 if (sc->sc_rb.rb_td_nbusy <= 0) {
1155 CTR0(KTR_HME, "hme_tint: not busy!");
1159 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
1160 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1162 if ((txflags & HME_XD_OWN) != 0)
1165 CTR0(KTR_HME, "hme_tint: not owned");
1166 --sc->sc_rb.rb_td_nbusy;
1167 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1169 /* Complete packet transmitted? */
1170 if ((txflags & HME_XD_EOP) == 0)
1173 KASSERT(htx->htx_lastdesc == ri,
1174 ("%s: ring indices skewed: %d != %d!",
1175 __func__, htx->htx_lastdesc, ri));
1176 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1177 BUS_DMASYNC_POSTWRITE);
1178 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1181 m_freem(htx->htx_m);
1183 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1184 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1185 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1187 sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
1190 sc->sc_rb.rb_tdtail = ri;
1192 hme_start_locked(ifp);
1196 * RX TCP/UDP checksum
1199 hme_rxcksum(struct mbuf *m, u_int32_t flags)
1201 struct ether_header *eh;
1204 int32_t hlen, len, pktlen;
1205 u_int16_t cksum, *opts;
1208 pktlen = m->m_pkthdr.len;
1209 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1211 eh = mtod(m, struct ether_header *);
1212 if (eh->ether_type != htons(ETHERTYPE_IP))
1214 ip = (struct ip *)(eh + 1);
1215 if (ip->ip_v != IPVERSION)
1218 hlen = ip->ip_hl << 2;
1219 pktlen -= sizeof(struct ether_header);
1220 if (hlen < sizeof(struct ip))
1222 if (ntohs(ip->ip_len) < hlen)
1224 if (ntohs(ip->ip_len) != pktlen)
1226 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1227 return; /* can't handle fragmented packet */
1231 if (pktlen < (hlen + sizeof(struct tcphdr)))
1235 if (pktlen < (hlen + sizeof(struct udphdr)))
1237 uh = (struct udphdr *)((caddr_t)ip + hlen);
1238 if (uh->uh_sum == 0)
1239 return; /* no checksum */
1245 cksum = ~(flags & HME_XD_RXCKSUM);
1246 /* checksum fixup for IP options */
1247 len = hlen - sizeof(struct ip);
1249 opts = (u_int16_t *)(ip + 1);
1250 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1251 temp32 = cksum - *opts;
1252 temp32 = (temp32 >> 16) + (temp32 & 65535);
1253 cksum = temp32 & 65535;
1256 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1257 m->m_pkthdr.csum_data = cksum;
1261 * Receive interrupt.
1264 hme_rint(struct hme_softc *sc)
1266 caddr_t xdr = sc->sc_rb.rb_rxd;
1267 struct ifnet *ifp = sc->sc_ifp;
1268 unsigned int ri, len;
1273 * Process all buffers with valid data.
1275 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1276 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1277 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
1278 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1279 if ((flags & HME_XD_OWN) != 0)
1283 if ((flags & HME_XD_OFL) != 0) {
1284 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1285 "flags=0x%x\n", ri, flags);
1287 hme_discard_rxbuf(sc, ri);
1289 len = HME_XD_DECODE_RSIZE(flags);
1290 hme_read(sc, ri, len, flags);
1294 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1295 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1297 sc->sc_rb.rb_rdtail = ri;
1301 hme_eint(struct hme_softc *sc, u_int status)
1304 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1305 device_printf(sc->sc_dev, "XXXlink status changed: "
1306 "cfg=%#x, stat=%#x, sm=%#x\n",
1307 HME_MIF_READ_4(sc, HME_MIFI_CFG),
1308 HME_MIF_READ_4(sc, HME_MIFI_STAT),
1309 HME_MIF_READ_4(sc, HME_MIFI_SM));
1313 /* check for fatal errors that needs reset to unfreeze DMA engine */
1314 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1315 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1316 hme_init_locked(sc);
1323 struct hme_softc *sc = (struct hme_softc *)v;
1327 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1328 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1330 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1331 hme_eint(sc, status);
1333 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1336 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1342 hme_watchdog(struct hme_softc *sc)
1344 struct ifnet *ifp = sc->sc_ifp;
1346 HME_LOCK_ASSERT(sc, MA_OWNED);
1349 CTR1(KTR_HME, "hme_watchdog: status %x",
1350 (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
1353 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1356 if ((sc->sc_flags & HME_LINK) != 0)
1357 device_printf(sc->sc_dev, "device timeout\n");
1358 else if (bootverbose)
1359 device_printf(sc->sc_dev, "device timeout (no link)\n");
1362 hme_init_locked(sc);
1363 hme_start_locked(ifp);
1364 return (EJUSTRETURN);
1368 * Initialize the MII Management Interface
1371 hme_mifinit(struct hme_softc *sc)
1376 * Configure the MIF in frame mode, polling disabled, internal PHY
1379 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
1382 * If the currently selected media uses the external transceiver,
1383 * enable its MII drivers (which basically isolates the internal
1384 * one and vice versa). In case the current media hasn't been set,
1385 * yet, we default to the internal transceiver.
1387 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
1388 if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
1389 sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
1391 v |= HME_MAC_XIF_MIIENABLE;
1393 v &= ~HME_MAC_XIF_MIIENABLE;
1394 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
1401 hme_mii_readreg(device_t dev, int phy, int reg)
1403 struct hme_softc *sc;
1407 /* We can at most have two PHYs. */
1408 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1411 sc = device_get_softc(dev);
1412 /* Select the desired PHY in the MIF configuration register */
1413 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1414 if (phy == HME_PHYAD_EXTERNAL)
1415 v |= HME_MIF_CFG_PHY;
1417 v &= ~HME_MIF_CFG_PHY;
1418 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1420 /* Construct the frame command */
1421 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1423 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1424 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1425 (reg << HME_MIF_FO_REGAD_SHIFT);
1427 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1428 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1429 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1430 for (n = 0; n < 100; n++) {
1432 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1433 if (v & HME_MIF_FO_TALSB)
1434 return (v & HME_MIF_FO_DATA);
1437 device_printf(sc->sc_dev, "mii_read timeout\n");
1442 hme_mii_writereg(device_t dev, int phy, int reg, int val)
1444 struct hme_softc *sc;
1448 /* We can at most have two PHYs. */
1449 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL)
1452 sc = device_get_softc(dev);
1453 /* Select the desired PHY in the MIF configuration register */
1454 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1455 if (phy == HME_PHYAD_EXTERNAL)
1456 v |= HME_MIF_CFG_PHY;
1458 v &= ~HME_MIF_CFG_PHY;
1459 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1461 /* Construct the frame command */
1462 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1464 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1465 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1466 (reg << HME_MIF_FO_REGAD_SHIFT) |
1467 (val & HME_MIF_FO_DATA);
1469 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1470 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1471 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1472 for (n = 0; n < 100; n++) {
1474 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1475 if (v & HME_MIF_FO_TALSB)
1479 device_printf(sc->sc_dev, "mii_write timeout\n");
1484 hme_mii_statchg(device_t dev)
1486 struct hme_softc *sc;
1487 uint32_t rxcfg, txcfg;
1489 sc = device_get_softc(dev);
1492 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1493 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
1496 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1497 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1498 sc->sc_flags |= HME_LINK;
1500 sc->sc_flags &= ~HME_LINK;
1502 txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1503 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
1504 HME_MAC_TXCFG_ENABLE, 0))
1505 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
1506 rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1507 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
1508 HME_MAC_RXCFG_ENABLE, 0))
1509 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1511 /* Set the MAC Full Duplex bit appropriately. */
1512 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1513 txcfg |= HME_MAC_TXCFG_FULLDPLX;
1515 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
1516 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
1518 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1519 (sc->sc_flags & HME_LINK) != 0) {
1520 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
1521 HME_MAC_TXCFG_ENABLE))
1522 device_printf(sc->sc_dev, "cannot enable TX MAC\n");
1523 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
1524 HME_MAC_RXCFG_ENABLE))
1525 device_printf(sc->sc_dev, "cannot enable RX MAC\n");
1530 hme_mediachange(struct ifnet *ifp)
1532 struct hme_softc *sc = ifp->if_softc;
1536 error = hme_mediachange_locked(sc);
1542 hme_mediachange_locked(struct hme_softc *sc)
1544 struct mii_softc *child;
1546 HME_LOCK_ASSERT(sc, MA_OWNED);
1549 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1550 device_printf(sc->sc_dev, "hme_mediachange_locked");
1556 * If both PHYs are present reset them. This is required for
1557 * unisolating the previously isolated PHY when switching PHYs.
1558 * As the above hme_mifinit() call will set the MII drivers in
1559 * the XIF configuration register accoring to the currently
1560 * selected media, there should be no window during which the
1561 * data paths of both transceivers are open at the same time,
1562 * even if the PHY device drivers use MIIF_NOISOLATE.
1564 if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
1565 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
1566 mii_phy_reset(child);
1567 return (mii_mediachg(sc->sc_mii));
1571 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1573 struct hme_softc *sc = ifp->if_softc;
1576 if ((ifp->if_flags & IFF_UP) == 0) {
1581 mii_pollstat(sc->sc_mii);
1582 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1583 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1588 * Process an ioctl request.
1591 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1593 struct hme_softc *sc = ifp->if_softc;
1594 struct ifreq *ifr = (struct ifreq *)data;
1600 if ((ifp->if_flags & IFF_UP) != 0) {
1601 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1602 ((ifp->if_flags ^ sc->sc_ifflags) &
1603 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1604 hme_setladrf(sc, 1);
1606 hme_init_locked(sc);
1607 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1609 if ((ifp->if_flags & IFF_LINK0) != 0)
1610 sc->sc_csum_features |= CSUM_UDP;
1612 sc->sc_csum_features &= ~CSUM_UDP;
1613 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1614 ifp->if_hwassist = sc->sc_csum_features;
1615 sc->sc_ifflags = ifp->if_flags;
1622 hme_setladrf(sc, 1);
1628 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1632 ifp->if_capenable = ifr->ifr_reqcap;
1633 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1634 ifp->if_hwassist = sc->sc_csum_features;
1636 ifp->if_hwassist = 0;
1640 error = ether_ioctl(ifp, cmd, data);
1648 * Set up the logical address filter.
1651 hme_setladrf(struct hme_softc *sc, int reenable)
1653 struct ifnet *ifp = sc->sc_ifp;
1654 struct ifmultiaddr *inm;
1659 HME_LOCK_ASSERT(sc, MA_OWNED);
1660 /* Clear the hash table. */
1661 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1663 /* Get the current RX configuration. */
1664 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1667 * Turn off promiscuous mode, promiscuous group mode (all multicast),
1668 * and hash filter. Depending on the case, the right bit will be
1671 macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
1674 * Disable the receiver while changing it's state as the documentation
1676 * We then must wait until the bit clears in the register. This should
1677 * take at most 3.5ms.
1679 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1680 HME_MAC_RXCFG_ENABLE, 0))
1681 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1682 /* Disable the hash filter before writing to the filter registers. */
1683 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1684 HME_MAC_RXCFG_HENABLE, 0))
1685 device_printf(sc->sc_dev, "cannot disable hash filter\n");
1687 /* Make the RX MAC really SIMPLEX. */
1688 macc |= HME_MAC_RXCFG_ME;
1690 macc |= HME_MAC_RXCFG_ENABLE;
1692 macc &= ~HME_MAC_RXCFG_ENABLE;
1694 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1695 macc |= HME_MAC_RXCFG_PMISC;
1698 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1699 macc |= HME_MAC_RXCFG_PGRP;
1703 macc |= HME_MAC_RXCFG_HENABLE;
1706 * Set up multicast address filter by passing all multicast addresses
1707 * through a crc generator, and then using the high order 6 bits as an
1708 * index into the 64 bit logical address filter. The high order bit
1709 * selects the word, while the rest of the bits select the bit within
1714 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
1715 if (inm->ifma_addr->sa_family != AF_LINK)
1717 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1718 inm->ifma_addr), ETHER_ADDR_LEN);
1720 /* Just want the 6 most significant bits. */
1723 /* Set the corresponding bit in the filter. */
1724 hash[crc >> 4] |= 1 << (crc & 0xf);
1726 IF_ADDR_UNLOCK(ifp);
1729 /* Now load the hash table into the chip */
1730 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1731 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1732 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1733 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1734 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1735 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1737 device_printf(sc->sc_dev, "cannot configure RX MAC\n");