2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001-2003 Thomas Moestl <tmm@FreeBSD.org>.
6 * This code is derived from software contributed to The NetBSD Foundation
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
37 * from: NetBSD: hme.c,v 1.45 2005/02/18 00:22:11 heas Exp
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
44 * HME Ethernet module driver.
46 * The HME is e.g. part of the PCIO PCI multi function device.
47 * It supports TX gathering and TX and RX checksum offloading.
48 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
49 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
50 * are skipped to make sure the header after the ethernet header is aligned on a
51 * natural boundary, so this ensures minimal wastage in the most common case.
53 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
54 * maximum packet size (this is not verified). Buffers starting on odd
55 * boundaries must be mapped so that the burst can start on a natural boundary.
57 * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading.
58 * In reality, we can do the same technique for UDP datagram too. However,
59 * the hardware doesn't compensate the checksum for UDP datagram which can yield
60 * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It
61 * can be reactivated by setting special link option link0 with ifconfig(8).
63 #define HME_CSUM_FEATURES (CSUM_TCP)
67 #define KTR_HME KTR_SPARE2 /* XXX */
69 #include <sys/param.h>
70 #include <sys/systm.h>
72 #include <sys/endian.h>
73 #include <sys/kernel.h>
74 #include <sys/module.h>
77 #include <sys/malloc.h>
78 #include <sys/socket.h>
79 #include <sys/sockio.h>
82 #include <net/ethernet.h>
84 #include <net/if_var.h>
85 #include <net/if_arp.h>
86 #include <net/if_dl.h>
87 #include <net/if_media.h>
88 #include <net/if_types.h>
89 #include <net/if_vlan_var.h>
91 #include <netinet/in.h>
92 #include <netinet/in_systm.h>
93 #include <netinet/ip.h>
94 #include <netinet/tcp.h>
95 #include <netinet/udp.h>
97 #include <dev/mii/mii.h>
98 #include <dev/mii/miivar.h>
100 #include <machine/bus.h>
102 #include <dev/hme/if_hmereg.h>
103 #include <dev/hme/if_hmevar.h>
105 CTASSERT(powerof2(HME_NRXDESC) && HME_NRXDESC >= 32 && HME_NRXDESC <= 256);
106 CTASSERT(HME_NTXDESC % 16 == 0 && HME_NTXDESC >= 16 && HME_NTXDESC <= 256);
108 static void hme_start(struct ifnet *);
109 static void hme_start_locked(struct ifnet *);
110 static void hme_stop(struct hme_softc *);
111 static int hme_ioctl(struct ifnet *, u_long, caddr_t);
112 static void hme_tick(void *);
113 static int hme_watchdog(struct hme_softc *);
114 static void hme_init(void *);
115 static void hme_init_locked(struct hme_softc *);
116 static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
117 static int hme_meminit(struct hme_softc *);
118 static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
119 u_int32_t, u_int32_t);
120 static void hme_mifinit(struct hme_softc *);
121 static void hme_setladrf(struct hme_softc *, int);
123 static int hme_mediachange(struct ifnet *);
124 static int hme_mediachange_locked(struct hme_softc *);
125 static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
127 static int hme_load_txmbuf(struct hme_softc *, struct mbuf **);
128 static void hme_read(struct hme_softc *, int, int, u_int32_t);
129 static void hme_eint(struct hme_softc *, u_int);
130 static void hme_rint(struct hme_softc *);
131 static void hme_tint(struct hme_softc *);
132 static void hme_rxcksum(struct mbuf *, u_int32_t);
134 static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
136 devclass_t hme_devclass;
140 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
141 MODULE_DEPEND(hme, miibus, 1, 1, 1);
143 #define HME_SPC_READ_4(spc, sc, offs) \
144 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
146 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
147 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
149 #define HME_SPC_BARRIER(spc, sc, offs, l, f) \
150 bus_space_barrier((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
153 #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
154 #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
155 #define HME_SEB_BARRIER(sc, offs, l, f) \
156 HME_SPC_BARRIER(seb, (sc), (offs), (l), (f))
157 #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
158 #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
159 #define HME_ERX_BARRIER(sc, offs, l, f) \
160 HME_SPC_BARRIER(erx, (sc), (offs), (l), (f))
161 #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
162 #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
163 #define HME_ETX_BARRIER(sc, offs, l, f) \
164 HME_SPC_BARRIER(etx, (sc), (offs), (l), (f))
165 #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
166 #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
167 #define HME_MAC_BARRIER(sc, offs, l, f) \
168 HME_SPC_BARRIER(mac, (sc), (offs), (l), (f))
169 #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
170 #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
171 #define HME_MIF_BARRIER(sc, offs, l, f) \
172 HME_SPC_BARRIER(mif, (sc), (offs), (l), (f))
175 #define HME_WHINE(dev, ...) do { \
176 if (hme_nerr++ < HME_MAXERR) \
177 device_printf(dev, __VA_ARGS__); \
178 if (hme_nerr == HME_MAXERR) { \
179 device_printf(dev, "too many errors; not reporting " \
184 /* Support oversized VLAN frames. */
185 #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)
188 hme_config(struct hme_softc *sc)
191 struct mii_softc *child;
193 int error, rdesc, tdesc, i;
195 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
200 * HME common initialization.
202 * hme_softc fields that must be initialized by the front-end:
207 * the bus handles, tags and offsets (splitted for SBus compatibility):
208 * sc_seb{t,h,o} (Shared Ethernet Block registers)
209 * sc_erx{t,h,o} (Receiver Unit registers)
210 * sc_etx{t,h,o} (Transmitter Unit registers)
211 * sc_mac{t,h,o} (MAC registers)
212 * sc_mif{t,h,o} (Management Interface registers)
214 * the maximum bus burst size:
219 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_lock, 0);
221 /* Make sure the chip is stopped. */
226 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
227 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
228 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
229 NULL, NULL, &sc->sc_pdmatag);
234 * Create control, RX and TX mbuf DMA tags.
235 * Buffer descriptors must be aligned on a 2048 byte boundary;
236 * take this into account when calculating the size. Note that
237 * the maximum number of descriptors (256) occupies 2048 bytes,
238 * so we allocate that much regardless of HME_N*DESC.
241 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
242 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
243 1, size, 0, busdma_lock_mutex, &sc->sc_lock, &sc->sc_cdmatag);
247 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
248 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
249 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag);
253 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
254 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
255 MCLBYTES * HME_NTXSEGS, HME_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW,
256 NULL, NULL, &sc->sc_tdmatag);
260 /* Allocate the control DMA buffer. */
261 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
262 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdmamap);
264 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
268 /* Load the control DMA buffer. */
269 sc->sc_rb.rb_dmabase = 0;
270 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
271 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
272 sc->sc_rb.rb_dmabase == 0) {
273 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
277 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
278 sc->sc_rb.rb_dmabase);
281 * Prepare the RX descriptors. rdesc serves as marker for the last
282 * processed descriptor and may be used later on.
284 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
285 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
286 error = bus_dmamap_create(sc->sc_rdmatag, 0,
287 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
291 error = bus_dmamap_create(sc->sc_rdmatag, 0,
292 &sc->sc_rb.rb_spare_dmamap);
295 /* Same for the TX descs. */
296 for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) {
297 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
298 error = bus_dmamap_create(sc->sc_tdmatag, 0,
299 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
304 sc->sc_csum_features = HME_CSUM_FEATURES;
305 /* Initialize ifnet structure. */
307 if_initname(ifp, device_get_name(sc->sc_dev),
308 device_get_unit(sc->sc_dev));
309 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
310 ifp->if_start = hme_start;
311 ifp->if_ioctl = hme_ioctl;
312 ifp->if_init = hme_init;
313 IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ);
314 ifp->if_snd.ifq_drv_maxlen = HME_NTXQ;
315 IFQ_SET_READY(&ifp->if_snd);
320 * DP83840A used with HME chips don't advertise their media
321 * capabilities themselves properly so force writing the ANAR
322 * according to the BMSR in mii_phy_setmedia().
324 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
325 hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_EXTERNAL,
326 MII_OFFSET_ANY, MIIF_FORCEANEG);
327 i = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, hme_mediachange,
328 hme_mediastatus, BMSR_DEFCAPMASK, HME_PHYAD_INTERNAL,
329 MII_OFFSET_ANY, MIIF_FORCEANEG);
330 if (error != 0 && i != 0) {
332 device_printf(sc->sc_dev, "attaching PHYs failed\n");
335 sc->sc_mii = device_get_softc(sc->sc_miibus);
338 * Walk along the list of attached MII devices and
339 * establish an `MII instance' to `PHY number'
340 * mapping. We'll use this mapping to enable the MII
341 * drivers of the external transceiver according to
342 * the currently selected media.
344 sc->sc_phys[0] = sc->sc_phys[1] = -1;
345 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list) {
347 * Note: we support just two PHYs: the built-in
348 * internal device and an external on the MII
351 if ((child->mii_phy != HME_PHYAD_EXTERNAL &&
352 child->mii_phy != HME_PHYAD_INTERNAL) ||
353 child->mii_inst > 1) {
354 device_printf(sc->sc_dev, "cannot accommodate "
355 "MII device %s at phy %d, instance %d\n",
356 device_get_name(child->mii_dev),
357 child->mii_phy, child->mii_inst);
361 sc->sc_phys[child->mii_inst] = child->mii_phy;
364 /* Attach the interface. */
365 ether_ifattach(ifp, sc->sc_enaddr);
368 * Tell the upper layer(s) we support long frames/checksum offloads.
370 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
371 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
372 ifp->if_hwassist |= sc->sc_csum_features;
373 ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM;
377 for (i = 0; i < tdesc; i++) {
378 bus_dmamap_destroy(sc->sc_tdmatag,
379 sc->sc_rb.rb_txdesc[i].htx_dmamap);
381 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
383 for (i = 0; i < rdesc; i++) {
384 bus_dmamap_destroy(sc->sc_rdmatag,
385 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
387 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
389 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
391 bus_dma_tag_destroy(sc->sc_tdmatag);
393 bus_dma_tag_destroy(sc->sc_rdmatag);
395 bus_dma_tag_destroy(sc->sc_cdmatag);
397 bus_dma_tag_destroy(sc->sc_pdmatag);
404 hme_detach(struct hme_softc *sc)
406 struct ifnet *ifp = sc->sc_ifp;
412 callout_drain(&sc->sc_tick_ch);
415 device_delete_child(sc->sc_dev, sc->sc_miibus);
417 for (i = 0; i < HME_NTXQ; i++) {
418 bus_dmamap_destroy(sc->sc_tdmatag,
419 sc->sc_rb.rb_txdesc[i].htx_dmamap);
421 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
422 for (i = 0; i < HME_NRXDESC; i++) {
423 bus_dmamap_destroy(sc->sc_rdmatag,
424 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
426 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
427 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
428 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
429 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
430 bus_dma_tag_destroy(sc->sc_tdmatag);
431 bus_dma_tag_destroy(sc->sc_rdmatag);
432 bus_dma_tag_destroy(sc->sc_cdmatag);
433 bus_dma_tag_destroy(sc->sc_pdmatag);
437 hme_suspend(struct hme_softc *sc)
446 hme_resume(struct hme_softc *sc)
448 struct ifnet *ifp = sc->sc_ifp;
451 if ((ifp->if_flags & IFF_UP) != 0)
457 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
459 struct hme_softc *sc = (struct hme_softc *)xsc;
464 ("%s: too many DMA segments (%d)", __func__, nsegs));
465 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
471 struct hme_softc *sc = arg;
474 HME_LOCK_ASSERT(sc, MA_OWNED);
478 * Unload collision counters
480 if_inc_counter(ifp, IFCOUNTER_COLLISIONS,
481 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
482 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
483 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
484 HME_MAC_READ_4(sc, HME_MACI_LTCNT));
487 * then clear the hardware counters.
489 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
490 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
491 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
492 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
494 mii_tick(sc->sc_mii);
496 if (hme_watchdog(sc) == EJUSTRETURN)
499 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
503 hme_stop(struct hme_softc *sc)
508 callout_stop(&sc->sc_tick_ch);
509 sc->sc_wdog_timer = 0;
510 sc->sc_ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
511 sc->sc_flags &= ~HME_LINK;
513 /* Mask all interrupts */
514 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, 0xffffffff);
516 /* Reset transmitter and receiver */
517 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
519 HME_SEB_BARRIER(sc, HME_SEBI_RESET, 4,
520 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
521 for (n = 0; n < 20; n++) {
522 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
523 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
528 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
532 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
533 * ring for subsequent use.
536 hme_discard_rxbuf(struct hme_softc *sc, int ix)
540 * Dropped a packet, reinitialize the descriptor and turn the
541 * ownership back to the hardware.
543 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd,
544 ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc,
545 &sc->sc_rb.rb_rxdesc[ix])));
549 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
551 struct hme_rxdesc *rd;
553 bus_dma_segment_t segs[1];
558 rd = &sc->sc_rb.rb_rxdesc[ri];
559 unmap = rd->hrx_m != NULL;
560 if (unmap && keepold) {
562 * Reinitialize the descriptor flags, as they may have been
563 * altered by the hardware.
565 hme_discard_rxbuf(sc, ri);
568 if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
570 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
571 b = mtod(m, uintptr_t);
573 * Required alignment boundary. At least 16 is needed, but since
574 * the mapping must be done in a way that a burst can start on a
575 * natural boundary we might need to extend this.
577 a = imax(HME_MINRXALIGN, sc->sc_burst);
579 * Make sure the buffer suitably aligned. The 2 byte offset is removed
580 * when the mbuf is handed up. XXX: this ensures at least 16 byte
581 * alignment of the header adjacent to the ethernet header, which
582 * should be sufficient in all cases. Nevertheless, this second-guesses
585 m_adj(m, roundup2(b, a) - b);
586 if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
587 m, segs, &nsegs, 0) != 0) {
591 /* If nsegs is wrong then the stack is corrupt. */
593 ("%s: too many DMA segments (%d)", __func__, nsegs));
595 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
596 BUS_DMASYNC_POSTREAD);
597 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
599 map = rd->hrx_dmamap;
600 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
601 sc->sc_rb.rb_spare_dmamap = map;
602 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
603 HME_XD_SETADDR(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
606 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, sc->sc_rb.rb_rxd, ri,
607 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd)));
612 hme_meminit(struct hme_softc *sc)
614 struct hme_ring *hr = &sc->sc_rb;
615 struct hme_txdesc *td;
622 dma = hr->rb_dmabase;
625 * Allocate transmit descriptors
629 p += HME_NTXDESC * HME_XD_SIZE;
630 dma += HME_NTXDESC * HME_XD_SIZE;
632 * We have reserved descriptor space until the next 2048 byte
635 dma = (bus_addr_t)roundup((u_long)dma, 2048);
636 p = (caddr_t)roundup((u_long)p, 2048);
639 * Allocate receive descriptors
643 p += HME_NRXDESC * HME_XD_SIZE;
644 dma += HME_NRXDESC * HME_XD_SIZE;
645 /* Again move forward to the next 2048 byte boundary.*/
646 dma = (bus_addr_t)roundup((u_long)dma, 2048);
647 p = (caddr_t)roundup((u_long)p, 2048);
650 * Initialize transmit buffer descriptors
652 for (i = 0; i < HME_NTXDESC; i++) {
653 HME_XD_SETADDR(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
654 HME_XD_SETFLAGS(sc->sc_flags & HME_PCI, hr->rb_txd, i, 0);
657 STAILQ_INIT(&sc->sc_rb.rb_txfreeq);
658 STAILQ_INIT(&sc->sc_rb.rb_txbusyq);
659 for (i = 0; i < HME_NTXQ; i++) {
660 td = &sc->sc_rb.rb_txdesc[i];
661 if (td->htx_m != NULL) {
662 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
663 BUS_DMASYNC_POSTWRITE);
664 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
668 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q);
672 * Initialize receive buffer descriptors
674 for (i = 0; i < HME_NRXDESC; i++) {
675 error = hme_add_rxbuf(sc, i, 1);
680 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
681 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
683 hr->rb_tdhead = hr->rb_tdtail = 0;
686 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
688 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
690 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
691 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
692 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
693 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
698 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
699 u_int32_t clr, u_int32_t set)
705 HME_MAC_WRITE_4(sc, reg, val);
706 HME_MAC_BARRIER(sc, reg, 4,
707 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
708 if (clr == 0 && set == 0)
709 return (1); /* just write, no bits to wait for */
713 val = HME_MAC_READ_4(sc, reg);
715 /* After 3.5ms, we should have been done. */
716 device_printf(sc->sc_dev, "timeout while writing to "
717 "MAC configuration register\n");
720 } while ((val & clr) != 0 && (val & set) != set);
725 * Initialization of interface; set up initialization block
726 * and transmit/receive descriptor rings.
731 struct hme_softc *sc = (struct hme_softc *)xsc;
739 hme_init_locked(struct hme_softc *sc)
741 struct ifnet *ifp = sc->sc_ifp;
745 HME_LOCK_ASSERT(sc, MA_OWNED);
747 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
751 * Initialization sequence. The numbered steps below correspond
752 * to the sequence outlined in section 6.3.5.1 in the Ethernet
753 * Channel Engine manual (part of the PCIO manual).
754 * See also the STP2002-STQ document from Sun Microsystems.
757 /* step 1 & 2. Reset the Ethernet Channel */
760 /* Re-initialize the MIF */
764 /* Mask all MIF interrupts, just in case */
765 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
768 /* step 3. Setup data structures in host memory */
769 if (hme_meminit(sc) != 0) {
770 device_printf(sc->sc_dev, "out of buffers; init aborted.");
774 /* step 4. TX MAC registers & counters */
775 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
776 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
777 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
778 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
779 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE);
781 /* Load station MAC address */
783 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
784 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
785 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
788 * Init seed for backoff
789 * (source suggested by manual: low 10 bits of MAC address)
791 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
792 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
794 /* Note: Accepting power-on default for other MAC registers here.. */
796 /* step 5. RX MAC registers & counters */
799 /* step 6 & 7. Program Descriptor Ring Base Addresses */
800 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
801 /* Transmit Descriptor ring size: in increments of 16 */
802 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
804 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
805 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE);
807 /* step 8. Global Configuration & Interrupt Mask */
808 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
809 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
810 HME_SEB_STAT_HOSTTOTX |
811 HME_SEB_STAT_RXTOHOST |
813 HME_SEB_STAT_TXPERR |
814 HME_SEB_STAT_RCNTEXP |
815 HME_SEB_STAT_ALL_ERRORS ));
817 switch (sc->sc_burst) {
822 v = HME_SEB_CFG_BURST16;
825 v = HME_SEB_CFG_BURST32;
828 v = HME_SEB_CFG_BURST64;
832 * Blindly setting 64bit transfers may hang PCI cards(Cheerio?).
833 * Allowing 64bit transfers breaks TX checksum offload as well.
834 * Don't know this comes from hardware bug or driver's DMAing
837 * if (sc->sc_flags & HME_PCI == 0)
838 * v |= HME_SEB_CFG_64BIT;
840 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
842 /* step 9. ETX Configuration: use mostly default values */
845 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
846 v |= HME_ETX_CFG_DMAENABLE;
847 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
849 /* step 10. ERX Configuration */
850 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
852 /* Encode Receive Descriptor ring size: four possible values */
853 v &= ~HME_ERX_CFG_RINGSIZEMSK;
854 switch (HME_NRXDESC) {
856 v |= HME_ERX_CFG_RINGSIZE32;
859 v |= HME_ERX_CFG_RINGSIZE64;
862 v |= HME_ERX_CFG_RINGSIZE128;
865 v |= HME_ERX_CFG_RINGSIZE256;
868 printf("hme: invalid Receive Descriptor ring size\n");
872 /* Enable DMA, fix RX first byte offset. */
873 v &= ~HME_ERX_CFG_FBO_MASK;
874 v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT);
875 /* RX TCP/UDP checksum offset */
876 n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2;
877 n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK;
879 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
880 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
882 /* step 11. XIF Configuration */
883 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
885 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
886 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
888 /* step 12. RX_MAC Configuration Register */
889 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
890 v |= HME_MAC_RXCFG_ENABLE;
891 v &= ~(HME_MAC_RXCFG_DCRCS);
892 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
893 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
895 /* step 13. TX_MAC Configuration Register */
896 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
897 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
898 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
899 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
901 /* step 14. Issue Transmit Pending command */
904 /* Debug: double-check. */
905 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
906 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
907 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
908 HME_ERX_READ_4(sc, HME_ERXI_RING),
909 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
910 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
911 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
912 HME_ERX_READ_4(sc, HME_ERXI_CFG),
913 HME_ETX_READ_4(sc, HME_ETXI_CFG));
914 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
915 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
916 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
919 ifp->if_drv_flags |= IFF_DRV_RUNNING;
920 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
922 /* Set the current media. */
923 hme_mediachange_locked(sc);
925 /* Start the one second timer. */
926 sc->sc_wdog_timer = 0;
927 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
931 * Routine to DMA map an mbuf chain, set up the descriptor rings
932 * accordingly and start the transmission.
933 * Returns 0 on success, -1 if there were not enough free descriptors
934 * to map the packet, or an errno otherwise.
936 * XXX: this relies on the fact that segments returned by
937 * bus_dmamap_load_mbuf_sg() are readable from the nearest burst
938 * boundary on (i.e. potentially before ds_addr) to the first
939 * boundary beyond the end. This is usually a safe assumption to
940 * make, but is not documented.
943 hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
945 bus_dma_segment_t segs[HME_NTXSEGS];
946 struct hme_txdesc *htx;
950 int error, i, nsegs, pci, ri, si;
951 uint32_t cflags, flags;
953 if ((htx = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL)
957 if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
958 if (M_WRITABLE(*m0) == 0) {
959 m = m_dup(*m0, M_NOWAIT);
965 i = sizeof(struct ether_header);
966 m = m_pullup(*m0, i + sizeof(struct ip));
971 ip = (struct ip *)(mtod(m, caddr_t) + i);
972 i += (ip->ip_hl << 2);
973 cflags = i << HME_XD_TXCKSUM_SSHIFT |
974 ((i + m->m_pkthdr.csum_data) << HME_XD_TXCKSUM_OSHIFT) |
979 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
980 *m0, segs, &nsegs, 0);
981 if (error == EFBIG) {
982 m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS);
989 error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
990 *m0, segs, &nsegs, 0);
996 } else if (error != 0)
998 /* If nsegs is wrong then the stack is corrupt. */
999 KASSERT(nsegs <= HME_NTXSEGS,
1000 ("%s: too many DMA segments (%d)", __func__, nsegs));
1006 if (sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) {
1007 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1008 /* Retry with m_collapse(9)? */
1011 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_PREWRITE);
1013 si = ri = sc->sc_rb.rb_tdhead;
1014 txd = sc->sc_rb.rb_txd;
1015 pci = sc->sc_flags & HME_PCI;
1016 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri,
1017 HME_XD_GETFLAGS(pci, txd, ri));
1018 for (i = 0; i < nsegs; i++) {
1019 /* Fill the ring entry. */
1020 flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len);
1022 flags |= HME_XD_SOP | cflags;
1024 flags |= HME_XD_OWN | cflags;
1025 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
1027 HME_XD_SETADDR(pci, txd, ri, segs[i].ds_addr);
1028 HME_XD_SETFLAGS(pci, txd, ri, flags);
1029 sc->sc_rb.rb_td_nbusy++;
1030 htx->htx_lastdesc = ri;
1031 ri = (ri + 1) % HME_NTXDESC;
1033 sc->sc_rb.rb_tdhead = ri;
1035 /* set EOP on the last descriptor */
1036 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
1037 flags = HME_XD_GETFLAGS(pci, txd, ri);
1038 flags |= HME_XD_EOP;
1039 CTR3(KTR_HME, "hme_load_mbuf: setting EOP ri %d, si %d (%#x)", ri, si,
1041 HME_XD_SETFLAGS(pci, txd, ri, flags);
1043 /* Turn the first descriptor ownership to the hme */
1044 flags = HME_XD_GETFLAGS(pci, txd, si);
1045 flags |= HME_XD_OWN;
1046 CTR2(KTR_HME, "hme_load_mbuf: setting OWN for 1st desc ri %d, (%#x)",
1048 HME_XD_SETFLAGS(pci, txd, si, flags);
1050 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q);
1051 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, htx, htx_q);
1054 /* start the transmission. */
1055 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
1061 * Pass a packet to the higher levels.
1064 hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags)
1066 struct ifnet *ifp = sc->sc_ifp;
1069 if (len <= sizeof(struct ether_header) ||
1070 len > HME_MAX_FRAMESIZE) {
1072 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1075 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1076 hme_discard_rxbuf(sc, ix);
1080 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1081 CTR1(KTR_HME, "hme_read: len %d", len);
1083 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1085 * hme_add_rxbuf will leave the old buffer in the ring until
1086 * it is sure that a new buffer can be mapped. If it can not,
1087 * drop the packet, but leave the interface up.
1089 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1090 hme_discard_rxbuf(sc, ix);
1094 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1096 m->m_pkthdr.rcvif = ifp;
1097 m->m_pkthdr.len = m->m_len = len + HME_RXOFFS;
1098 m_adj(m, HME_RXOFFS);
1099 /* RX TCP/UDP checksum */
1100 if (ifp->if_capenable & IFCAP_RXCSUM)
1101 hme_rxcksum(m, flags);
1102 /* Pass the packet up. */
1104 (*ifp->if_input)(ifp, m);
1109 hme_start(struct ifnet *ifp)
1111 struct hme_softc *sc = ifp->if_softc;
1114 hme_start_locked(ifp);
1119 hme_start_locked(struct ifnet *ifp)
1121 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1125 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1126 IFF_DRV_RUNNING || (sc->sc_flags & HME_LINK) == 0)
1129 for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1130 sc->sc_rb.rb_td_nbusy < HME_NTXDESC - 1;) {
1131 IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
1135 error = hme_load_txmbuf(sc, &m);
1139 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1140 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1148 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1149 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1150 sc->sc_wdog_timer = 5;
1155 * Transmit interrupt.
1158 hme_tint(struct hme_softc *sc)
1161 struct ifnet *ifp = sc->sc_ifp;
1162 struct hme_txdesc *htx;
1163 unsigned int ri, txflags;
1165 txd = sc->sc_rb.rb_txd;
1166 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1167 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1168 /* Fetch current position in the transmit ring */
1169 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1170 if (sc->sc_rb.rb_td_nbusy <= 0) {
1171 CTR0(KTR_HME, "hme_tint: not busy!");
1175 txflags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, txd, ri);
1176 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1178 if ((txflags & HME_XD_OWN) != 0)
1181 CTR0(KTR_HME, "hme_tint: not owned");
1182 --sc->sc_rb.rb_td_nbusy;
1183 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1185 /* Complete packet transmitted? */
1186 if ((txflags & HME_XD_EOP) == 0)
1189 KASSERT(htx->htx_lastdesc == ri,
1190 ("%s: ring indices skewed: %d != %d!",
1191 __func__, htx->htx_lastdesc, ri));
1192 bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap,
1193 BUS_DMASYNC_POSTWRITE);
1194 bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap);
1196 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1197 m_freem(htx->htx_m);
1199 STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q);
1200 STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q);
1201 htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq);
1203 sc->sc_wdog_timer = sc->sc_rb.rb_td_nbusy > 0 ? 5 : 0;
1206 sc->sc_rb.rb_tdtail = ri;
1208 hme_start_locked(ifp);
1212 * RX TCP/UDP checksum
1215 hme_rxcksum(struct mbuf *m, u_int32_t flags)
1217 struct ether_header *eh;
1220 int32_t hlen, len, pktlen;
1221 u_int16_t cksum, *opts;
1224 pktlen = m->m_pkthdr.len;
1225 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1227 eh = mtod(m, struct ether_header *);
1228 if (eh->ether_type != htons(ETHERTYPE_IP))
1230 ip = (struct ip *)(eh + 1);
1231 if (ip->ip_v != IPVERSION)
1234 hlen = ip->ip_hl << 2;
1235 pktlen -= sizeof(struct ether_header);
1236 if (hlen < sizeof(struct ip))
1238 if (ntohs(ip->ip_len) < hlen)
1240 if (ntohs(ip->ip_len) != pktlen)
1242 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1243 return; /* can't handle fragmented packet */
1247 if (pktlen < (hlen + sizeof(struct tcphdr)))
1251 if (pktlen < (hlen + sizeof(struct udphdr)))
1253 uh = (struct udphdr *)((caddr_t)ip + hlen);
1254 if (uh->uh_sum == 0)
1255 return; /* no checksum */
1261 cksum = ~(flags & HME_XD_RXCKSUM);
1262 /* checksum fixup for IP options */
1263 len = hlen - sizeof(struct ip);
1265 opts = (u_int16_t *)(ip + 1);
1266 for (; len > 0; len -= sizeof(u_int16_t), opts++) {
1267 temp32 = cksum - *opts;
1268 temp32 = (temp32 >> 16) + (temp32 & 65535);
1269 cksum = temp32 & 65535;
1272 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1273 m->m_pkthdr.csum_data = cksum;
1277 * Receive interrupt.
1280 hme_rint(struct hme_softc *sc)
1282 caddr_t xdr = sc->sc_rb.rb_rxd;
1283 struct ifnet *ifp = sc->sc_ifp;
1284 unsigned int ri, len;
1289 * Process all buffers with valid data.
1291 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD);
1292 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1293 flags = HME_XD_GETFLAGS(sc->sc_flags & HME_PCI, xdr, ri);
1294 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1295 if ((flags & HME_XD_OWN) != 0)
1299 if ((flags & HME_XD_OFL) != 0) {
1300 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1301 "flags=0x%x\n", ri, flags);
1302 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
1303 hme_discard_rxbuf(sc, ri);
1305 len = HME_XD_DECODE_RSIZE(flags);
1306 hme_read(sc, ri, len, flags);
1310 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1311 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1313 sc->sc_rb.rb_rdtail = ri;
1317 hme_eint(struct hme_softc *sc, u_int status)
1320 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1321 device_printf(sc->sc_dev, "XXXlink status changed: "
1322 "cfg=%#x, stat=%#x, sm=%#x\n",
1323 HME_MIF_READ_4(sc, HME_MIFI_CFG),
1324 HME_MIF_READ_4(sc, HME_MIFI_STAT),
1325 HME_MIF_READ_4(sc, HME_MIFI_SM));
1329 /* check for fatal errors that needs reset to unfreeze DMA engine */
1330 if ((status & HME_SEB_STAT_FATAL_ERRORS) != 0) {
1331 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1332 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1333 hme_init_locked(sc);
1340 struct hme_softc *sc = (struct hme_softc *)v;
1344 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1345 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1347 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1348 hme_eint(sc, status);
1350 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1353 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1359 hme_watchdog(struct hme_softc *sc)
1361 struct ifnet *ifp = sc->sc_ifp;
1363 HME_LOCK_ASSERT(sc, MA_OWNED);
1366 CTR1(KTR_HME, "hme_watchdog: status %x",
1367 (u_int)HME_SEB_READ_4(sc, HME_SEBI_STAT));
1370 if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0)
1373 if ((sc->sc_flags & HME_LINK) != 0)
1374 device_printf(sc->sc_dev, "device timeout\n");
1375 else if (bootverbose)
1376 device_printf(sc->sc_dev, "device timeout (no link)\n");
1377 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1379 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1380 hme_init_locked(sc);
1381 hme_start_locked(ifp);
1382 return (EJUSTRETURN);
1386 * Initialize the MII Management Interface
1389 hme_mifinit(struct hme_softc *sc)
1394 * Configure the MIF in frame mode, polling disabled, internal PHY
1397 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, 0);
1400 * If the currently selected media uses the external transceiver,
1401 * enable its MII drivers (which basically isolates the internal
1402 * one and vice versa). In case the current media hasn't been set,
1403 * yet, we default to the internal transceiver.
1405 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
1406 if (sc->sc_mii != NULL && sc->sc_mii->mii_media.ifm_cur != NULL &&
1407 sc->sc_phys[IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media)] ==
1409 v |= HME_MAC_XIF_MIIENABLE;
1411 v &= ~HME_MAC_XIF_MIIENABLE;
1412 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
1419 hme_mii_readreg(device_t dev, int phy, int reg)
1421 struct hme_softc *sc;
1425 sc = device_get_softc(dev);
1426 /* Select the desired PHY in the MIF configuration register */
1427 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1428 if (phy == HME_PHYAD_EXTERNAL)
1429 v |= HME_MIF_CFG_PHY;
1431 v &= ~HME_MIF_CFG_PHY;
1432 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1434 /* Construct the frame command */
1435 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1437 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1438 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1439 (reg << HME_MIF_FO_REGAD_SHIFT);
1441 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1442 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1443 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1444 for (n = 0; n < 100; n++) {
1446 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1447 if (v & HME_MIF_FO_TALSB)
1448 return (v & HME_MIF_FO_DATA);
1451 device_printf(sc->sc_dev, "mii_read timeout\n");
1456 hme_mii_writereg(device_t dev, int phy, int reg, int val)
1458 struct hme_softc *sc;
1462 sc = device_get_softc(dev);
1463 /* Select the desired PHY in the MIF configuration register */
1464 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1465 if (phy == HME_PHYAD_EXTERNAL)
1466 v |= HME_MIF_CFG_PHY;
1468 v &= ~HME_MIF_CFG_PHY;
1469 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1471 /* Construct the frame command */
1472 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1474 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1475 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1476 (reg << HME_MIF_FO_REGAD_SHIFT) |
1477 (val & HME_MIF_FO_DATA);
1479 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1480 HME_MIF_BARRIER(sc, HME_MIFI_FO, 4,
1481 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
1482 for (n = 0; n < 100; n++) {
1484 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1485 if (v & HME_MIF_FO_TALSB)
1489 device_printf(sc->sc_dev, "mii_write timeout\n");
1494 hme_mii_statchg(device_t dev)
1496 struct hme_softc *sc;
1497 uint32_t rxcfg, txcfg;
1499 sc = device_get_softc(dev);
1502 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1503 device_printf(sc->sc_dev, "hme_mii_statchg: status change\n");
1506 if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 &&
1507 IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE)
1508 sc->sc_flags |= HME_LINK;
1510 sc->sc_flags &= ~HME_LINK;
1512 txcfg = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1513 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg,
1514 HME_MAC_TXCFG_ENABLE, 0))
1515 device_printf(sc->sc_dev, "cannot disable TX MAC\n");
1516 rxcfg = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1517 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg,
1518 HME_MAC_RXCFG_ENABLE, 0))
1519 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1521 /* Set the MAC Full Duplex bit appropriately. */
1522 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1523 txcfg |= HME_MAC_TXCFG_FULLDPLX;
1525 txcfg &= ~HME_MAC_TXCFG_FULLDPLX;
1526 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, txcfg);
1528 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1529 (sc->sc_flags & HME_LINK) != 0) {
1530 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, txcfg, 0,
1531 HME_MAC_TXCFG_ENABLE))
1532 device_printf(sc->sc_dev, "cannot enable TX MAC\n");
1533 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, rxcfg, 0,
1534 HME_MAC_RXCFG_ENABLE))
1535 device_printf(sc->sc_dev, "cannot enable RX MAC\n");
1540 hme_mediachange(struct ifnet *ifp)
1542 struct hme_softc *sc = ifp->if_softc;
1546 error = hme_mediachange_locked(sc);
1552 hme_mediachange_locked(struct hme_softc *sc)
1554 struct mii_softc *child;
1556 HME_LOCK_ASSERT(sc, MA_OWNED);
1559 if ((sc->sc_ifp->if_flags & IFF_DEBUG) != 0)
1560 device_printf(sc->sc_dev, "hme_mediachange_locked");
1566 * If both PHYs are present reset them. This is required for
1567 * unisolating the previously isolated PHY when switching PHYs.
1568 * As the above hme_mifinit() call will set the MII drivers in
1569 * the XIF configuration register according to the currently
1570 * selected media, there should be no window during which the
1571 * data paths of both transceivers are open at the same time,
1572 * even if the PHY device drivers use MIIF_NOISOLATE.
1574 if (sc->sc_phys[0] != -1 && sc->sc_phys[1] != -1)
1575 LIST_FOREACH(child, &sc->sc_mii->mii_phys, mii_list)
1577 return (mii_mediachg(sc->sc_mii));
1581 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1583 struct hme_softc *sc = ifp->if_softc;
1586 if ((ifp->if_flags & IFF_UP) == 0) {
1591 mii_pollstat(sc->sc_mii);
1592 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1593 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1598 * Process an ioctl request.
1601 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1603 struct hme_softc *sc = ifp->if_softc;
1604 struct ifreq *ifr = (struct ifreq *)data;
1610 if ((ifp->if_flags & IFF_UP) != 0) {
1611 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 &&
1612 ((ifp->if_flags ^ sc->sc_ifflags) &
1613 (IFF_ALLMULTI | IFF_PROMISC)) != 0)
1614 hme_setladrf(sc, 1);
1616 hme_init_locked(sc);
1617 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1619 if ((ifp->if_flags & IFF_LINK0) != 0)
1620 sc->sc_csum_features |= CSUM_UDP;
1622 sc->sc_csum_features &= ~CSUM_UDP;
1623 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1624 ifp->if_hwassist = sc->sc_csum_features;
1625 sc->sc_ifflags = ifp->if_flags;
1632 hme_setladrf(sc, 1);
1638 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1642 ifp->if_capenable = ifr->ifr_reqcap;
1643 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
1644 ifp->if_hwassist = sc->sc_csum_features;
1646 ifp->if_hwassist = 0;
1650 error = ether_ioctl(ifp, cmd, data);
1658 * Set up the logical address filter.
1661 hme_setladrf(struct hme_softc *sc, int reenable)
1663 struct ifnet *ifp = sc->sc_ifp;
1664 struct ifmultiaddr *inm;
1669 HME_LOCK_ASSERT(sc, MA_OWNED);
1670 /* Clear the hash table. */
1671 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1673 /* Get the current RX configuration. */
1674 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1677 * Turn off promiscuous mode, promiscuous group mode (all multicast),
1678 * and hash filter. Depending on the case, the right bit will be
1681 macc &= ~(HME_MAC_RXCFG_PGRP | HME_MAC_RXCFG_PMISC);
1684 * Disable the receiver while changing it's state as the documentation
1686 * We then must wait until the bit clears in the register. This should
1687 * take at most 3.5ms.
1689 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1690 HME_MAC_RXCFG_ENABLE, 0))
1691 device_printf(sc->sc_dev, "cannot disable RX MAC\n");
1692 /* Disable the hash filter before writing to the filter registers. */
1693 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1694 HME_MAC_RXCFG_HENABLE, 0))
1695 device_printf(sc->sc_dev, "cannot disable hash filter\n");
1697 /* Make the RX MAC really SIMPLEX. */
1698 macc |= HME_MAC_RXCFG_ME;
1700 macc |= HME_MAC_RXCFG_ENABLE;
1702 macc &= ~HME_MAC_RXCFG_ENABLE;
1704 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1705 macc |= HME_MAC_RXCFG_PMISC;
1708 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
1709 macc |= HME_MAC_RXCFG_PGRP;
1713 macc |= HME_MAC_RXCFG_HENABLE;
1716 * Set up multicast address filter by passing all multicast addresses
1717 * through a crc generator, and then using the high order 6 bits as an
1718 * index into the 64 bit logical address filter. The high order bit
1719 * selects the word, while the rest of the bits select the bit within
1723 if_maddr_rlock(ifp);
1724 TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) {
1725 if (inm->ifma_addr->sa_family != AF_LINK)
1727 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *)
1728 inm->ifma_addr), ETHER_ADDR_LEN);
1730 /* Just want the 6 most significant bits. */
1733 /* Set the corresponding bit in the filter. */
1734 hash[crc >> 4] |= 1 << (crc & 0xf);
1736 if_maddr_runlock(ifp);
1739 /* Now load the hash table into the chip */
1740 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1741 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1742 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1743 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1744 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1745 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE |
1747 device_printf(sc->sc_dev, "cannot configure RX MAC\n");