2 * Copyright (c) 1999 The NetBSD Foundation, Inc.
3 * Copyright (c) 2001 Thomas Moestl <tmm@FreeBSD.org>.
6 * This code is derived from software contributed to The NetBSD Foundation
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. Neither the name of The NetBSD Foundation nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
37 * from: NetBSD: hme.c,v 1.20 2000/12/14 06:27:25 thorpej Exp
43 * HME Ethernet module driver.
45 * The HME is e.g. part of the PCIO PCI multi function device.
46 * It supports TX gathering and TX and RX checksum offloading.
47 * RX buffers must be aligned at a programmable offset modulo 16. We choose 2
48 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes
49 * are skipped to make sure the header after the ethernet header is aligned on a
50 * natural boundary, so this ensures minimal wastage in the most common case.
52 * Also, apparently, the buffers must extend to a DMA burst boundary beyond the
53 * maximum packet size (this is not verified). Buffers starting on odd
54 * boundaries must be mapped so that the burst can start on a natural boundary.
56 * Checksumming is not yet supported.
60 #define KTR_HME KTR_CT2 /* XXX */
62 #include <sys/param.h>
63 #include <sys/systm.h>
65 #include <sys/endian.h>
66 #include <sys/kernel.h>
69 #include <sys/malloc.h>
70 #include <sys/socket.h>
71 #include <sys/sockio.h>
73 #include <net/ethernet.h>
75 #include <net/if_arp.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
79 #include <dev/mii/mii.h>
80 #include <dev/mii/miivar.h>
82 #include <machine/bus.h>
84 #include <hme/if_hmereg.h>
85 #include <hme/if_hmevar.h>
87 static void hme_start(struct ifnet *);
88 static void hme_stop(struct hme_softc *);
89 static int hme_ioctl(struct ifnet *, u_long, caddr_t);
90 static void hme_tick(void *);
91 static void hme_watchdog(struct ifnet *);
93 static void hme_shutdown(void *);
95 static void hme_init(void *);
96 static int hme_add_rxbuf(struct hme_softc *, unsigned int, int);
97 static int hme_meminit(struct hme_softc *);
98 static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t,
99 u_int32_t, u_int32_t);
100 static void hme_mifinit(struct hme_softc *);
101 static void hme_reset(struct hme_softc *);
102 static void hme_setladrf(struct hme_softc *, int);
104 static int hme_mediachange(struct ifnet *);
105 static void hme_mediastatus(struct ifnet *, struct ifmediareq *);
107 static int hme_load_mbuf(struct hme_softc *, struct mbuf *);
108 static void hme_read(struct hme_softc *, int, int);
109 static void hme_eint(struct hme_softc *, u_int);
110 static void hme_rint(struct hme_softc *);
111 static void hme_tint(struct hme_softc *);
113 static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int);
114 static void hme_rxdma_callback(void *, bus_dma_segment_t *, int, int);
115 static void hme_txdma_callback(void *, bus_dma_segment_t *, int, int);
117 devclass_t hme_devclass;
121 DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0);
122 MODULE_DEPEND(hem, miibus, 1, 1, 1);
124 #define HME_SPC_READ_4(spc, sc, offs) \
125 bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
126 (sc)->sc_ ## spc ## o + (offs))
127 #define HME_SPC_WRITE_4(spc, sc, offs, v) \
128 bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \
129 (sc)->sc_ ## spc ## o + (offs), (v))
131 #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs))
132 #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v))
133 #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs))
134 #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v))
135 #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs))
136 #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v))
137 #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs))
138 #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v))
139 #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs))
140 #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v))
143 #define HME_WHINE(dev, ...) do { \
144 if (hme_nerr++ < HME_MAXERR) \
145 device_printf(dev, __VA_ARGS__); \
146 if (hme_nerr == HME_MAXERR) { \
147 device_printf(dev, "too may errors; not reporting any " \
153 hme_config(struct hme_softc *sc)
155 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
156 struct mii_softc *child;
158 int error, rdesc, tdesc, i;
161 * HME common initialization.
163 * hme_softc fields that must be initialized by the front-end:
168 * the bus handles, tags and offsets (splitted for SBus compatability):
169 * sc_seb{t,h,o} (Shared Ethernet Block registers)
170 * sc_erx{t,h,o} (Receiver Unit registers)
171 * sc_etx{t,h,o} (Transmitter Unit registers)
172 * sc_mac{t,h,o} (MAC registers)
173 * sc_mif{t,h,o} (Managment Interface registers)
175 * the maximum bus burst size:
180 /* Make sure the chip is stopped. */
184 * Allocate DMA capable memory
185 * Buffer descriptors must be aligned on a 2048 byte boundary;
186 * take this into account when calculating the size. Note that
187 * the maximum number of descriptors (256) occupies 2048 bytes,
188 * so we allocate that much regardless of HME_N*DESC.
192 error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT,
193 BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1,
194 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->sc_pdmatag);
198 error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0,
199 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
200 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, &sc->sc_cdmatag);
204 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
205 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
206 HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
211 error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0,
212 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
213 HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW,
218 /* Allocate control/TX DMA buffer */
219 error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase,
222 device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error);
226 /* Load the buffer */
227 sc->sc_rb.rb_dmabase = 0;
228 if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap,
229 sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 ||
230 sc->sc_rb.rb_dmabase == 0) {
231 device_printf(sc->sc_dev, "DMA buffer map load error %d\n",
235 CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase,
236 sc->sc_rb.rb_dmabase);
239 * Prepare the RX descriptors. rdesc serves as marker for the last
240 * processed descriptor and may be used later on.
242 for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) {
243 sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL;
244 error = bus_dmamap_create(sc->sc_rdmatag, 0,
245 &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap);
249 error = bus_dmamap_create(sc->sc_rdmatag, 0,
250 &sc->sc_rb.rb_spare_dmamap);
253 /* Same for the TX descs. */
254 for (tdesc = 0; tdesc < HME_NTXDESC; tdesc++) {
255 sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL;
256 sc->sc_rb.rb_txdesc[tdesc].htx_flags = 0;
257 error = bus_dmamap_create(sc->sc_tdmatag, 0,
258 &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap);
263 device_printf(sc->sc_dev, "Ethernet address:");
264 for (i = 0; i < 6; i++)
265 printf("%c%02x", i > 0 ? ':' : ' ', sc->sc_arpcom.ac_enaddr[i]);
268 /* Initialize ifnet structure. */
270 ifp->if_unit = device_get_unit(sc->sc_dev);
271 ifp->if_name = "hme";
272 ifp->if_mtu = ETHERMTU;
273 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |IFF_MULTICAST;
274 ifp->if_start = hme_start;
275 ifp->if_ioctl = hme_ioctl;
276 ifp->if_init = hme_init;
277 ifp->if_output = ether_output;
278 ifp->if_watchdog = hme_watchdog;
279 ifp->if_snd.ifq_maxlen = HME_NTXDESC;
283 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange,
284 hme_mediastatus)) != 0) {
285 device_printf(sc->sc_dev, "phy probe failed: %d\n", error);
288 sc->sc_mii = device_get_softc(sc->sc_miibus);
291 * Walk along the list of attached MII devices and
292 * establish an `MII instance' to `phy number'
293 * mapping. We'll use this mapping in media change
294 * requests to determine which phy to use to program
295 * the MIF configuration register.
297 for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL;
298 child = LIST_NEXT(child, mii_list)) {
300 * Note: we support just two PHYs: the built-in
301 * internal device and an external on the MII
304 if (child->mii_phy > 1 || child->mii_inst > 1) {
305 device_printf(sc->sc_dev, "cannot accomodate "
306 "MII device %s at phy %d, instance %d\n",
307 device_get_name(child->mii_dev),
308 child->mii_phy, child->mii_inst);
312 sc->sc_phys[child->mii_inst] = child->mii_phy;
315 /* Attach the interface. */
316 ether_ifattach(ifp, ETHER_BPF_SUPPORTED);
318 callout_init(&sc->sc_tick_ch, 0);
322 for (i = 0; i < tdesc; i++) {
323 bus_dmamap_destroy(sc->sc_tdmatag,
324 sc->sc_rb.rb_txdesc[i].htx_dmamap);
326 bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap);
328 for (i = 0; i < rdesc; i++) {
329 bus_dmamap_destroy(sc->sc_rdmatag,
330 sc->sc_rb.rb_rxdesc[i].hrx_dmamap);
332 bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap);
334 bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap);
336 bus_dma_tag_destroy(sc->sc_tdmatag);
338 bus_dma_tag_destroy(sc->sc_rdmatag);
340 bus_dma_tag_destroy(sc->sc_cdmatag);
342 bus_dma_tag_destroy(sc->sc_pdmatag);
347 hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
349 struct hme_softc *sc = (struct hme_softc *)xsc;
353 KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count"));
354 sc->sc_rb.rb_dmabase = segs[0].ds_addr;
360 struct hme_softc *sc = arg;
364 mii_tick(sc->sc_mii);
367 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
371 hme_reset(struct hme_softc *sc)
381 hme_stop(struct hme_softc *sc)
386 callout_stop(&sc->sc_tick_ch);
388 /* Reset transmitter and receiver */
389 HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX |
392 for (n = 0; n < 20; n++) {
393 v = HME_SEB_READ_4(sc, HME_SEBI_RESET);
394 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0)
399 device_printf(sc->sc_dev, "hme_stop: reset failed\n");
403 hme_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
407 /* XXX: A cluster should not contain more than one segment, correct? */
408 if (error != 0 || nsegs != 1)
410 *a = segs[0].ds_addr;
414 * Discard the contents of an mbuf in the RX ring, freeing the buffer in the
415 * ring for subsequent use.
418 hme_discard_rxbuf(struct hme_softc *sc, int ix, int sync)
422 * Dropped a packet, reinitialize the descriptor and turn the
423 * ownership back to the hardware.
425 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN |
426 HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ,
427 sc->sc_rb.rb_rxdesc[ix].hrx_len)));
429 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
430 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
435 hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
437 struct hme_rxdesc *rd;
440 bus_size_t len, offs;
445 rd = &sc->sc_rb.rb_rxdesc[ri];
446 unmap = rd->hrx_m != NULL;
447 if (unmap && keepold) {
449 * Reinitialize the descriptor flags, as they may have been
450 * altered by the hardware.
452 hme_discard_rxbuf(sc, ri, 0);
455 if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
457 m_clget(m, M_DONTWAIT);
458 if ((m->m_flags & M_EXT) == 0)
460 len = m->m_ext.ext_size;
463 * Required alignment boundary. At least 16 is needed, but since
464 * the mapping must be done in a way that a burst can start on a
465 * natural boundary we might need to extend this.
467 a = max(0x10, sc->sc_burst);
469 * Make sure the buffer suitably aligned: we need an offset of
470 * 2 modulo a. XXX: this ensures at least 16 byte alignment of the
471 * header adjacent to the ethernet header, which should be sufficient
472 * in all cases. Nevertheless, this second-guesses ALIGN().
474 offs = (a - (((uintptr_t)b - 2) & (a - 1))) % a;
476 /* Align the buffer on the boundary for mapping. */
479 if (bus_dmamap_load(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap,
480 b, len + 2, hme_rxdma_callback, &ba, 0) != 0 || ba == 0)
483 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap,
484 BUS_DMASYNC_POSTREAD);
485 bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap);
487 map = rd->hrx_dmamap;
488 rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap;
489 sc->sc_rb.rb_spare_dmamap = map;
491 rd->hrx_len = len - sc->sc_burst;
492 bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD);
493 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, ba);
494 /* Lazily leave at least one burst size grace space. */
495 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN |
496 HME_XD_ENCODE_RSIZE(ulmin(HME_BUFSZ, rd->hrx_len)));
506 hme_meminit(struct hme_softc *sc)
508 struct hme_ring *hr = &sc->sc_rb;
509 struct hme_txdesc *td;
516 dma = hr->rb_dmabase;
519 * Allocate transmit descriptors
523 p += HME_NTXDESC * HME_XD_SIZE;
524 dma += HME_NTXDESC * HME_XD_SIZE;
525 /* We have reserved descriptor space until the next 2048 byte boundary.*/
526 dma = (bus_addr_t)roundup((u_long)dma, 2048);
527 p = (caddr_t)roundup((u_long)p, 2048);
530 * Allocate receive descriptors
534 p += HME_NRXDESC * HME_XD_SIZE;
535 dma += HME_NRXDESC * HME_XD_SIZE;
536 /* Again move forward to the next 2048 byte boundary.*/
537 dma = (bus_addr_t)roundup((u_long)dma, 2048);
538 p = (caddr_t)roundup((u_long)p, 2048);
541 * Initialize transmit buffer descriptors
543 for (i = 0; i < HME_NTXDESC; i++) {
544 td = &sc->sc_rb.rb_txdesc[i];
545 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0);
546 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0);
547 if (td->htx_m != NULL) {
551 if ((td->htx_flags & HTXF_MAPPED) != 0)
552 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
557 * Initialize receive buffer descriptors
559 for (i = 0; i < HME_NRXDESC; i++) {
560 error = hme_add_rxbuf(sc, i, 1);
565 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
566 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
568 hr->rb_tdhead = hr->rb_tdtail = 0;
571 CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd,
573 CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd,
575 CTR2(KTR_HME, "rx entry 1: flags %x, address %x",
576 *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4));
577 CTR2(KTR_HME, "tx entry 1: flags %x, address %x",
578 *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4));
583 hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val,
584 u_int32_t clr, u_int32_t set)
590 HME_MAC_WRITE_4(sc, reg, val);
591 if (clr == 0 && set == 0)
592 return (1); /* just write, no bits to wait for */
596 val = HME_MAC_READ_4(sc, reg);
598 /* After 3.5ms, we should have been done. */
599 device_printf(sc->sc_dev, "timeout while writing to "
600 "MAC configuration register\n");
603 } while ((val & clr) != 0 && (val & set) != set);
608 * Initialization of interface; set up initialization block
609 * and transmit/receive descriptor rings.
614 struct hme_softc *sc = (struct hme_softc *)xsc;
615 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
620 * Initialization sequence. The numbered steps below correspond
621 * to the sequence outlined in section 6.3.5.1 in the Ethernet
622 * Channel Engine manual (part of the PCIO manual).
623 * See also the STP2002-STQ document from Sun Microsystems.
626 /* step 1 & 2. Reset the Ethernet Channel */
629 /* Re-initialize the MIF */
632 /* Call MI reset function if any */
634 (*sc->sc_hwreset)(sc);
637 /* Mask all MIF interrupts, just in case */
638 HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff);
641 /* step 3. Setup data structures in host memory */
642 if (hme_meminit(sc) != 0) {
643 device_printf(sc->sc_dev, "out of buffers; init aborted.");
647 /* step 4. TX MAC registers & counters */
648 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
649 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
650 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
651 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
652 HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, ETHER_MAX_LEN);
654 /* Load station MAC address */
655 ea = sc->sc_arpcom.ac_enaddr;
656 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]);
657 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]);
658 HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]);
661 * Init seed for backoff
662 * (source suggested by manual: low 10 bits of MAC address)
664 v = ((ea[4] << 8) | ea[5]) & 0x3fff;
665 HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v);
668 /* Note: Accepting power-on default for other MAC registers here.. */
670 /* step 5. RX MAC registers & counters */
673 /* step 6 & 7. Program Descriptor Ring Base Addresses */
674 HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma);
675 /* Transmit Descriptor ring size: in increments of 16 */
676 HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1);
678 HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma);
679 HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, ETHER_MAX_LEN);
681 /* step 8. Global Configuration & Interrupt Mask */
682 HME_SEB_WRITE_4(sc, HME_SEBI_IMASK,
683 ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/
684 HME_SEB_STAT_HOSTTOTX |
685 HME_SEB_STAT_RXTOHOST |
687 HME_SEB_STAT_TXPERR |
688 HME_SEB_STAT_RCNTEXP |
689 HME_SEB_STAT_ALL_ERRORS ));
691 switch (sc->sc_burst) {
696 v = HME_SEB_CFG_BURST16;
699 v = HME_SEB_CFG_BURST32;
702 v = HME_SEB_CFG_BURST64;
705 HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v);
707 /* step 9. ETX Configuration: use mostly default values */
710 v = HME_ETX_READ_4(sc, HME_ETXI_CFG);
711 v |= HME_ETX_CFG_DMAENABLE;
712 HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v);
714 /* step 10. ERX Configuration */
715 v = HME_ERX_READ_4(sc, HME_ERXI_CFG);
717 /* Encode Receive Descriptor ring size: four possible values */
718 v &= ~HME_ERX_CFG_RINGSIZEMSK;
719 switch (HME_NRXDESC) {
721 v |= HME_ERX_CFG_RINGSIZE32;
724 v |= HME_ERX_CFG_RINGSIZE64;
727 v |= HME_ERX_CFG_RINGSIZE128;
730 v |= HME_ERX_CFG_RINGSIZE256;
733 printf("hme: invalid Receive Descriptor ring size\n");
737 /* Enable DMA, fix RX first byte offset to 2. */
738 v &= ~HME_ERX_CFG_FBO_MASK;
739 v |= HME_ERX_CFG_DMAENABLE | (2 << HME_ERX_CFG_FBO_SHIFT);
740 CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v);
741 HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v);
743 /* step 11. XIF Configuration */
744 v = HME_MAC_READ_4(sc, HME_MACI_XIF);
746 /* If an external transceiver is connected, enable its MII drivers */
747 if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0)
748 v |= HME_MAC_XIF_MIIENABLE;
749 CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v);
750 HME_MAC_WRITE_4(sc, HME_MACI_XIF, v);
752 /* step 12. RX_MAC Configuration Register */
753 v = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
754 v |= HME_MAC_RXCFG_ENABLE;
755 v &= ~(HME_MAC_RXCFG_DCRCS);
756 CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v);
757 HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v);
759 /* step 13. TX_MAC Configuration Register */
760 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
761 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP);
762 CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v);
763 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
765 /* step 14. Issue Transmit Pending command */
767 /* Call MI initialization function if any */
769 (*sc->sc_hwinit)(sc);
772 /* Debug: double-check. */
773 CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, "
774 "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING),
775 HME_ETX_READ_4(sc, HME_ETXI_RSIZE),
776 HME_ERX_READ_4(sc, HME_ERXI_RING),
777 HME_MAC_READ_4(sc, HME_MACI_RXSIZE));
778 CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x",
779 HME_SEB_READ_4(sc, HME_SEBI_IMASK),
780 HME_ERX_READ_4(sc, HME_ERXI_CFG),
781 HME_ETX_READ_4(sc, HME_ETXI_CFG));
782 CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x",
783 HME_MAC_READ_4(sc, HME_MACI_RXCFG),
784 HME_MAC_READ_4(sc, HME_MACI_TXCFG));
787 /* Start the one second timer. */
788 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc);
790 ifp->if_flags |= IFF_RUNNING;
791 ifp->if_flags &= ~IFF_OACTIVE;
796 struct hme_txdma_arg {
797 struct hme_softc *hta_sc;
805 /* Values for hta_flags */
806 #define HTAF_SOP 1 /* Start of packet (first mbuf in chain) */
807 #define HTAF_EOP 2 /* Start of packet (last mbuf in chain) */
810 hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
812 struct hme_txdma_arg *ta = xsc;
813 struct hme_txdesc *td;
824 tdhead = &ta->hta_sc->sc_rb.rb_tdhead;
825 pci = ta->hta_sc->sc_pci;
826 txd = ta->hta_sc->sc_rb.rb_txd;
827 for (i = 0; i < nsegs; i++) {
828 if (ta->hta_sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
832 td = &ta->hta_sc->sc_rb.rb_txdesc[*tdhead];
833 addr = segs[i].ds_addr;
836 /* Adjust the offsets. */
837 addr += ta->hta_offs;
839 td->htx_flags = HTXF_MAPPED;
842 if (i == nsegs - 1) {
843 /* Subtract the pad. */
844 if (sz < ta->hta_pad) {
846 * Ooops. This should not have happened; it
847 * means that we got a zero-size segment or
848 * segment sizes were unnatural.
850 device_printf(ta->hta_sc->sc_dev,
851 "hme_txdma_callback: alignment glitch\n");
852 ta->hta_err = EINVAL;
856 /* If sz is 0 now, this does not matter. */
858 /* Fill the ring entry. */
859 flags = HME_XD_ENCODE_TSIZE(sz);
860 if ((ta->hta_flags & HTAF_SOP) != 0 && i == 0)
862 if ((ta->hta_flags & HTAF_EOP) != 0 && i == nsegs - 1) {
864 td->htx_m = ta->hta_m;
867 CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, "
868 "flags %#x, addr %#x", i + 1, nsegs, *tdhead, (u_int)flags,
870 HME_XD_SETFLAGS(pci, txd, *tdhead, flags);
871 HME_XD_SETADDR(pci, txd, *tdhead, addr);
873 ta->hta_sc->sc_rb.rb_td_nbusy++;
874 *tdhead = ((*tdhead) + 1) % HME_NTXDESC;
879 * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and
880 * start the transmission.
881 * Returns 0 on success, -1 if there were not enough free descriptors to map
882 * the packet, or an errno otherwise.
885 hme_load_mbuf(struct hme_softc *sc, struct mbuf *m0)
887 struct hme_txdma_arg cba;
888 struct mbuf *m = m0, *n;
889 struct hme_txdesc *td;
891 int error, len, si, ri, totlen, sum;
894 if ((m->m_flags & M_PKTHDR) == 0)
895 panic("hme_dmamap_load_mbuf: no packet header");
896 totlen = m->m_pkthdr.len;
898 si = sc->sc_rb.rb_tdhead;
901 cba.hta_flags = HTAF_SOP;
903 for (; m != NULL && sum < totlen; m = n) {
904 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC) {
913 td = &sc->sc_rb.rb_txdesc[sc->sc_rb.rb_tdhead];
914 if (n == NULL || sum >= totlen)
915 cba.hta_flags |= HTAF_EOP;
917 * This is slightly evil: we must map the buffer in a way that
918 * allows dma transfers to start on a natural burst boundary.
919 * This is done by rounding down the mapping address, and
920 * recording the required offset for the callback. With this,
921 * we cannot cross a page boundary because the burst size
922 * is a small power of two.
924 cba.hta_offs = (sc->sc_burst -
925 (mtod(m, uintptr_t) & (sc->sc_burst - 1))) % sc->sc_burst;
926 start = mtod(m, char *) - cba.hta_offs;
929 * Similarly, the end of the mapping should be on a natural
930 * burst boundary. XXX: Let's hope that any segment ends
931 * generated by the busdma code are also on such boundaries.
933 cba.hta_pad = (sc->sc_burst - (((uintptr_t)start + len) &
934 (sc->sc_burst - 1))) % sc->sc_burst;
936 /* Most of the work is done in the callback. */
937 if ((error = bus_dmamap_load(sc->sc_tdmatag, td->htx_dmamap,
938 start, len, hme_txdma_callback, &cba, 0)) != 0 ||
941 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
942 BUS_DMASYNC_PREWRITE);
946 /* Turn descriptor ownership to the hme, back to forth. */
947 ri = sc->sc_rb.rb_tdhead;
948 CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)",
949 ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri));
951 ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC;
952 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) |
954 CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)",
956 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags);
959 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
960 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
962 /* start the transmission. */
963 HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP);
966 for (ri = si; ri != sc->sc_rb.rb_tdhead; ri = (ri + 1) % HME_NTXDESC) {
967 td = &sc->sc_rb.rb_txdesc[ri];
968 if ((td->htx_flags & HTXF_MAPPED) != 0)
969 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
972 sc->sc_rb.rb_td_nbusy--;
973 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, 0);
975 sc->sc_rb.rb_tdhead = si;
976 error = cba.hta_err != 0 ? cba.hta_err : error;
978 device_printf(sc->sc_dev, "could not load mbuf: %d\n", error);
983 * Pass a packet to the higher levels.
986 hme_read(struct hme_softc *sc, int ix, int len)
988 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
989 struct ether_header *eh;
993 if (len <= sizeof(struct ether_header) ||
994 len > ETHERMTU + sizeof(struct ether_header)) {
996 HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n",
1000 hme_discard_rxbuf(sc, ix, 1);
1004 m = sc->sc_rb.rb_rxdesc[ix].hrx_m;
1005 offs = sc->sc_rb.rb_rxdesc[ix].hrx_offs;
1006 CTR2(KTR_HME, "hme_read: offs %d, len %d", offs, len);
1008 if (hme_add_rxbuf(sc, ix, 0) != 0) {
1010 * hme_add_rxbuf will leave the old buffer in the ring until
1011 * it is sure that a new buffer can be mapped. If it can not,
1012 * drop the packet, but leave the interface up.
1015 hme_discard_rxbuf(sc, ix, 1);
1021 /* Changed the rings; sync. */
1022 bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap,
1023 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1025 m->m_pkthdr.rcvif = ifp;
1026 m->m_pkthdr.len = m->m_len = len + offs;
1028 eh = mtod(m, struct ether_header *);
1029 m_adj(m, sizeof(struct ether_header));
1030 /* Pass the packet up. */
1031 ether_input(ifp, eh, m);
1035 hme_start(struct ifnet *ifp)
1037 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc;
1041 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1046 IF_DEQUEUE(&ifp->if_snd, m);
1050 error = hme_load_mbuf(sc, m);
1052 ifp->if_flags |= IFF_OACTIVE;
1053 IF_PREPEND(&ifp->if_snd, m);
1059 if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1)
1060 ifp->if_flags |= IFF_OACTIVE;
1061 /* Set watchdog timer if a packet was queued */
1067 * Transmit interrupt.
1070 hme_tint(struct hme_softc *sc)
1072 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1073 struct hme_txdesc *td;
1074 unsigned int ri, txflags;
1077 * Unload collision counters
1079 ifp->if_collisions +=
1080 HME_MAC_READ_4(sc, HME_MACI_NCCNT) +
1081 HME_MAC_READ_4(sc, HME_MACI_FCCNT) +
1082 HME_MAC_READ_4(sc, HME_MACI_EXCNT) +
1083 HME_MAC_READ_4(sc, HME_MACI_LTCNT);
1086 * then clear the hardware counters.
1088 HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0);
1089 HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0);
1090 HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0);
1091 HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0);
1093 /* Fetch current position in the transmit ring */
1094 for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) {
1095 if (sc->sc_rb.rb_td_nbusy <= 0) {
1096 CTR0(KTR_HME, "hme_tint: not busy!");
1100 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri);
1101 CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags);
1103 if ((txflags & HME_XD_OWN) != 0)
1106 td = &sc->sc_rb.rb_txdesc[ri];
1107 CTR1(KTR_HME, "hme_tint: not owned, dflags %#x", td->htx_flags);
1108 if ((td->htx_flags & HTXF_MAPPED) != 0) {
1109 bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap,
1110 BUS_DMASYNC_POSTWRITE);
1111 bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap);
1114 --sc->sc_rb.rb_td_nbusy;
1115 ifp->if_flags &= ~IFF_OACTIVE;
1117 /* Complete packet transmitted? */
1118 if ((txflags & HME_XD_EOP) == 0)
1125 /* Turn off watchdog */
1126 if (sc->sc_rb.rb_td_nbusy == 0)
1130 sc->sc_rb.rb_tdtail = ri;
1134 if (sc->sc_rb.rb_td_nbusy == 0)
1139 * Receive interrupt.
1142 hme_rint(struct hme_softc *sc)
1144 caddr_t xdr = sc->sc_rb.rb_rxd;
1145 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1146 unsigned int ri, len;
1150 * Process all buffers with valid data.
1152 for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) {
1153 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri);
1154 CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags);
1155 if ((flags & HME_XD_OWN) != 0)
1158 if ((flags & HME_XD_OFL) != 0) {
1159 device_printf(sc->sc_dev, "buffer overflow, ri=%d; "
1160 "flags=0x%x\n", ri, flags);
1162 hme_discard_rxbuf(sc, ri, 1);
1164 len = HME_XD_DECODE_RSIZE(flags);
1165 hme_read(sc, ri, len);
1169 sc->sc_rb.rb_rdtail = ri;
1173 hme_eint(struct hme_softc *sc, u_int status)
1176 if ((status & HME_SEB_STAT_MIFIRQ) != 0) {
1177 device_printf(sc->sc_dev, "XXXlink status changed\n");
1181 HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status);
1187 struct hme_softc *sc = (struct hme_softc *)v;
1190 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1191 CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status);
1193 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0)
1194 hme_eint(sc, status);
1196 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0)
1199 if ((status & HME_SEB_STAT_RXTOHOST) != 0)
1205 hme_watchdog(struct ifnet *ifp)
1207 struct hme_softc *sc = ifp->if_softc;
1211 status = HME_SEB_READ_4(sc, HME_SEBI_STAT);
1212 CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status);
1214 device_printf(sc->sc_dev, "device timeout\n");
1221 * Initialize the MII Management Interface
1224 hme_mifinit(struct hme_softc *sc)
1228 /* Configure the MIF in frame mode */
1229 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1230 v &= ~HME_MIF_CFG_BBMODE;
1231 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1238 hme_mii_readreg(device_t dev, int phy, int reg)
1240 struct hme_softc *sc = device_get_softc(dev);
1244 /* Select the desired PHY in the MIF configuration register */
1245 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1246 /* Clear PHY select bit */
1247 v &= ~HME_MIF_CFG_PHY;
1248 if (phy == HME_PHYAD_EXTERNAL)
1249 /* Set PHY select bit to get at external device */
1250 v |= HME_MIF_CFG_PHY;
1251 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1253 /* Construct the frame command */
1254 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1256 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) |
1257 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1258 (reg << HME_MIF_FO_REGAD_SHIFT);
1260 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1261 for (n = 0; n < 100; n++) {
1263 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1264 if (v & HME_MIF_FO_TALSB)
1265 return (v & HME_MIF_FO_DATA);
1268 device_printf(sc->sc_dev, "mii_read timeout\n");
1273 hme_mii_writereg(device_t dev, int phy, int reg, int val)
1275 struct hme_softc *sc = device_get_softc(dev);
1279 /* Select the desired PHY in the MIF configuration register */
1280 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1281 /* Clear PHY select bit */
1282 v &= ~HME_MIF_CFG_PHY;
1283 if (phy == HME_PHYAD_EXTERNAL)
1284 /* Set PHY select bit to get at external device */
1285 v |= HME_MIF_CFG_PHY;
1286 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1288 /* Construct the frame command */
1289 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) |
1291 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) |
1292 (phy << HME_MIF_FO_PHYAD_SHIFT) |
1293 (reg << HME_MIF_FO_REGAD_SHIFT) |
1294 (val & HME_MIF_FO_DATA);
1296 HME_MIF_WRITE_4(sc, HME_MIFI_FO, v);
1297 for (n = 0; n < 100; n++) {
1299 v = HME_MIF_READ_4(sc, HME_MIFI_FO);
1300 if (v & HME_MIF_FO_TALSB)
1304 device_printf(sc->sc_dev, "mii_write timeout\n");
1309 hme_mii_statchg(device_t dev)
1311 struct hme_softc *sc = device_get_softc(dev);
1312 int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media);
1313 int phy = sc->sc_phys[instance];
1318 printf("hme_mii_statchg: status change: phy = %d\n", phy);
1321 /* Select the current PHY in the MIF configuration register */
1322 v = HME_MIF_READ_4(sc, HME_MIFI_CFG);
1323 v &= ~HME_MIF_CFG_PHY;
1324 if (phy == HME_PHYAD_EXTERNAL)
1325 v |= HME_MIF_CFG_PHY;
1326 HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v);
1328 /* Set the MAC Full Duplex bit appropriately */
1329 v = HME_MAC_READ_4(sc, HME_MACI_TXCFG);
1330 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0))
1332 if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0)
1333 v |= HME_MAC_TXCFG_FULLDPLX;
1335 v &= ~HME_MAC_TXCFG_FULLDPLX;
1336 HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v);
1337 if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE))
1342 hme_mediachange(struct ifnet *ifp)
1344 struct hme_softc *sc = ifp->if_softc;
1346 return (mii_mediachg(sc->sc_mii));
1350 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1352 struct hme_softc *sc = ifp->if_softc;
1354 if ((ifp->if_flags & IFF_UP) == 0)
1357 mii_pollstat(sc->sc_mii);
1358 ifmr->ifm_active = sc->sc_mii->mii_media_active;
1359 ifmr->ifm_status = sc->sc_mii->mii_media_status;
1363 * Process an ioctl request.
1366 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1368 struct hme_softc *sc = ifp->if_softc;
1369 struct ifreq *ifr = (struct ifreq *)data;
1378 error = ether_ioctl(ifp, cmd, data);
1381 if ((ifp->if_flags & IFF_UP) == 0 &&
1382 (ifp->if_flags & IFF_RUNNING) != 0) {
1384 * If interface is marked down and it is running, then
1388 ifp->if_flags &= ~IFF_RUNNING;
1389 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1390 (ifp->if_flags & IFF_RUNNING) == 0) {
1392 * If interface is marked up and it is stopped, then
1396 } else if ((ifp->if_flags & IFF_UP) != 0) {
1398 * Reset the interface to pick up changes in any other
1399 * flags that affect hardware registers.
1405 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
1411 hme_setladrf(sc, 1);
1416 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd);
1429 hme_shutdown(void *arg)
1432 hme_stop((struct hme_softc *)arg);
1437 * Set up the logical address filter.
1440 hme_setladrf(struct hme_softc *sc, int reenable)
1442 struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1443 struct ifmultiaddr *inm;
1444 struct sockaddr_dl *sdl;
1451 /* Clear hash table */
1452 hash[3] = hash[2] = hash[1] = hash[0] = 0;
1454 /* Get current RX configuration */
1455 macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG);
1458 * Disable the receiver while changing it's state as the documentation
1460 * We then must wait until the bit clears in the register. This should
1461 * take at most 3.5ms.
1463 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0))
1465 /* Disable the hash filter before writing to the filter registers. */
1466 if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc,
1467 HME_MAC_RXCFG_HENABLE, 0))
1471 macc |= HME_MAC_RXCFG_ENABLE;
1473 macc &= ~HME_MAC_RXCFG_ENABLE;
1475 if ((ifp->if_flags & IFF_PROMISC) != 0) {
1476 /* Turn on promiscuous mode; turn off the hash filter */
1477 macc |= HME_MAC_RXCFG_PMISC;
1478 macc &= ~HME_MAC_RXCFG_HENABLE;
1479 ifp->if_flags |= IFF_ALLMULTI;
1483 /* Turn off promiscuous mode; turn on the hash filter */
1484 macc &= ~HME_MAC_RXCFG_PMISC;
1485 macc |= HME_MAC_RXCFG_HENABLE;
1488 * Set up multicast address filter by passing all multicast addresses
1489 * through a crc generator, and then using the high order 6 bits as an
1490 * index into the 64 bit logical address filter. The high order bit
1491 * selects the word, while the rest of the bits select the bit within
1495 TAILQ_FOREACH(inm, &sc->sc_arpcom.ac_if.if_multiaddrs, ifma_link) {
1496 if (inm->ifma_addr->sa_family != AF_LINK)
1498 sdl = (struct sockaddr_dl *)inm->ifma_addr;
1501 for (len = sdl->sdl_alen; --len >= 0;) {
1505 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */
1506 for (i = 0; i < 8; i++) {
1507 if ((crc & 1) ^ (octet & 1)) {
1516 /* Just want the 6 most significant bits. */
1519 /* Set the corresponding bit in the filter. */
1520 hash[crc >> 4] |= 1 << (crc & 0xf);
1523 ifp->if_flags &= ~IFF_ALLMULTI;
1526 /* Now load the hash table into the chip */
1527 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]);
1528 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]);
1529 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]);
1530 HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]);
1531 hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0,
1532 macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE));