2 * Copyright (c) 1992, 1993, University of Vermont and State
3 * Agricultural College.
4 * Copyright (c) 1992, 1993, Garrett A. Wollman.
7 * Copyright (c) 1990, 1991, William F. Jolitz
8 * Copyright (c) 1990, The Regents of the University of California
11 * Copyright (c) 1993, 1994, Charles M. Hannum
13 * EtherExpress 16 support:
14 * Copyright (c) 1993, 1994, 1995, Rodney W. Grimes
15 * Copyright (c) 1997, Aaron C. Smith
17 * All rights reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 * 3. All advertising materials mentioning features or use of this software
28 * must display the following acknowledgement:
29 * This product includes software developed by the University of
30 * Vermont and State Agricultural College and Garrett A. Wollman, by
31 * William F. Jolitz, by the University of California, Berkeley,
32 * Lawrence Berkeley Laboratory, and their contributors, by
33 * Charles M. Hannum, by Rodney W. Grimes, and by Aaron C. Smith.
34 * 4. Neither the names of the Universities nor the names of the authors
35 * may be used to endorse or promote products derived from this software
36 * without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41 * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE
42 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * MAINTAINER: Matthew N. Dodd <winter@jurai.net>
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
57 * Intel 82586 Ethernet chip
58 * Register, bit, and structure definitions.
60 * Written by GAW with reference to the Clarkson Packet Driver code for this
61 * chip written by Russ Nelson and others.
63 * Intel EtherExpress 16 support from if_ix.c, written by Rodney W. Grimes.
67 * The i82586 is a very versatile chip, found in many implementations.
68 * Programming this chip is mostly the same, but certain details differ
69 * from card to card. This driver is written so that different cards
70 * can be automatically detected at run-time.
76 * We run the 82586 in a standard Ethernet mode. We keep NFRAMES
77 * received frame descriptors around for the receiver to use, and
78 * NRXBUFS associated receive buffer descriptors, both in a circular
79 * list. Whenever a frame is received, we rotate both lists as
80 * necessary. (The 586 treats both lists as a simple queue.) We also
81 * keep a transmit command around so that packets can be sent off
84 * We configure the adapter in AL-LOC = 1 mode, which means that the
85 * Ethernet/802.3 MAC header is placed at the beginning of the receive
86 * buffer rather than being split off into various fields in the RFD.
87 * This also means that we must include this header in the transmit
90 * By convention, all transmit commands, and only transmit commands,
91 * shall have the I (IE_CMD_INTR) bit set in the command. This way,
92 * when an interrupt arrives at ieintr(), it is immediately possible
93 * to tell what precisely caused it. ANY OTHER command-sending routines
94 * should run at splimp(), and should post an acknowledgement to every
95 * interrupt they generate.
97 * The 82586 has a 24-bit address space internally, and the adaptor's
98 * memory is located at the top of this region. However, the value
99 * we are given in configuration is normally the *bottom* of the adaptor
100 * RAM. So, we must go through a few gyrations to come up with a
101 * kernel virtual address which represents the actual beginning of the
102 * 586 address space. First, we autosize the RAM by running through
103 * several possible sizes and trying to initialize the adapter under
104 * the assumption that the selected size is correct. Then, knowing
105 * the correct RAM size, we set up our pointers in the softc `iomem'
106 * represents the computed base of the 586 address space. `iomembot'
107 * represents the actual configured base of adapter RAM. Finally,
108 * `iosize' represents the calculated size of 586 RAM. Then, when
109 * laying out commands, we use the interval [iomembot, iomembot +
110 * iosize); to make 24-pointers, we subtract iomem, and to make
111 * 16-pointers, we subtract iomem and and with 0xffff.
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 #include <sys/eventhandler.h>
117 #include <sys/kernel.h>
118 #include <sys/malloc.h>
119 #include <sys/mbuf.h>
120 #include <sys/socket.h>
121 #include <sys/sockio.h>
122 #include <sys/syslog.h>
124 #include <sys/module.h>
127 #include <machine/bus.h>
128 #include <machine/resource.h>
129 #include <sys/rman.h>
131 #include <net/ethernet.h>
133 #include <net/if_var.h>
134 #include <net/if_types.h>
135 #include <net/if_dl.h>
137 #include <netinet/in.h>
138 #include <netinet/if_ether.h>
140 #include <dev/ic/i82586.h>
141 #include <dev/ie/if_ievar.h>
142 #include <dev/ie/if_iereg.h>
143 #include <dev/ie/if_ie507.h>
144 #include <dev/ie/if_iee16.h>
145 #include <i386/isa/elink.h>
150 #define IED_RINT 0x01
151 #define IED_TINT 0x02
154 #define IED_READFRAME 0x10
155 static int ie_debug = IED_RNR;
159 #define IE_BUF_LEN ETHER_MAX_LEN /* length of transmit buffer */
161 /* XXX this driver uses `volatile' and `caddr_t' to a fault. */
162 typedef volatile char *v_caddr_t; /* core address, pointer to volatile */
164 /* Forward declaration */
167 static void ieinit (void *);
168 static void ieinit_locked (struct ie_softc *);
169 static void ie_stop (struct ie_softc *);
170 static int ieioctl (struct ifnet *, u_long, caddr_t);
171 static void iestart (struct ifnet *);
172 static void iestart_locked (struct ifnet *);
175 ee16_interrupt_enable (struct ie_softc *);
178 ie_ack (struct ie_softc *, u_int);
179 static void iereset (struct ie_softc *);
180 static void ie_readframe (struct ie_softc *, int);
181 static void ie_drop_packet_buffer (struct ie_softc *);
182 static int command_and_wait (struct ie_softc *,
183 int, void volatile *, int);
184 static void run_tdr (struct ie_softc *,
185 volatile struct ie_tdr_cmd *);
186 static int ierint (struct ie_softc *);
187 static int ietint (struct ie_softc *);
188 static int iernr (struct ie_softc *);
189 static void start_receiver (struct ie_softc *);
191 ieget (struct ie_softc *, struct mbuf **);
192 static v_caddr_t setup_rfa (struct ie_softc *, v_caddr_t);
193 static int mc_setup (struct ie_softc *);
194 static void ie_mc_reset (struct ie_softc *);
197 static void print_rbd (volatile struct ie_recv_buf_desc * rbd);
198 static int in_ierint = 0;
199 static int in_ietint = 0;
202 static const char *ie_hardware_names[] = {
214 * sizeof(iscp) == 1+1+2+4 == 8
215 * sizeof(scb) == 2+2+2+2+2+2+2+2 == 16
216 * NFRAMES * sizeof(rfd) == NFRAMES*(2+2+2+2+6+6+2+2) == NFRAMES*24 == 384
217 * sizeof(xmit_cmd) == 2+2+2+2+6+2 == 18
218 * sizeof(transmit buffer) == 1512
219 * sizeof(transmit buffer desc) == 8
223 * NRXBUFS * sizeof(rbd) == NRXBUFS*(2+2+4+2+2) == NRXBUFS*12
224 * NRXBUFS * IE_RBUF_SIZE == NRXBUFS*256
226 * NRXBUFS should be (16384 - 1946) / (256 + 12) == 14438 / 268 == 53
228 * With NRXBUFS == 48, this leaves us 1574 bytes for another command or
229 * more buffers. Another transmit command would be 18+8+1512 == 1538
230 * ---just barely fits!
232 * Obviously all these would have to be reduced for smaller memory sizes.
233 * With a larger memory, it would be possible to roughly double the number
234 * of both transmit and receive buffers.
237 #define NFRAMES 4 /* number of receive frames */
238 #define NRXBUFS 24 /* number of buffers to allocate */
239 #define IE_RBUF_SIZE 256 /* size of each buffer, MUST BE POWER OF TWO */
240 #define NTXBUFS 1 /* number of transmit commands */
241 #define IE_TBUF_SIZE ETHER_MAX_LEN /* size of transmit buffer */
243 #define MK_24(base, ptr) ((caddr_t)((uintptr_t)ptr - (uintptr_t)base))
244 #define MK_16(base, ptr) ((u_short)(uintptr_t)MK_24(base, ptr))
247 ee16_shutdown(struct ie_softc *sc)
251 outb(PORT(sc) + IEE16_ECTRL, IEE16_RESET_ASIC);
252 outb(PORT(sc) + IEE16_ECTRL, 0);
256 * Taken almost exactly from Bill's if_is.c, then modified beyond recognition.
259 ie_attach(device_t dev)
261 struct ie_softc * sc;
266 sc = device_get_softc(dev);
267 ifp = sc->ifp = if_alloc(IFT_ETHER);
269 device_printf(sc->dev, "can not if_alloc()\n");
274 mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
278 * based on the amount of memory we have, allocate our tx and rx
281 factor = rman_get_size(sc->mem_res) / 8192;
282 sc->nframes = factor * NFRAMES;
283 sc->nrxbufs = factor * NRXBUFS;
284 sc->ntxbufs = factor * NTXBUFS;
287 * Since all of these guys are arrays of pointers, allocate as one
288 * big chunk and dole out accordingly.
290 allocsize = sizeof(void *) * (sc->nframes
292 + (sc->ntxbufs * 3));
293 sc->rframes = (volatile struct ie_recv_frame_desc **) malloc(allocsize,
296 if (sc->rframes == NULL) {
297 mtx_destroy(&sc->lock);
301 (volatile struct ie_recv_buf_desc **)&sc->rframes[sc->nframes];
302 sc->cbuffs = (volatile u_char **)&sc->rbuffs[sc->nrxbufs];
304 (volatile struct ie_xmit_cmd **)&sc->cbuffs[sc->nrxbufs];
306 (volatile struct ie_xmit_buf **)&sc->xmit_cmds[sc->ntxbufs];
307 sc->xmit_cbuffs = (volatile u_char **)&sc->xmit_buffs[sc->ntxbufs];
310 device_printf(sc->dev, "hardware type %s, revision %d\n",
311 ie_hardware_names[sc->hard_type], sc->hard_vers + 1);
314 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
315 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
316 ifp->if_start = iestart;
317 ifp->if_ioctl = ieioctl;
318 ifp->if_init = ieinit;
319 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
321 ether_ifattach(ifp, sc->enaddr);
323 error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE,
324 NULL, ie_intr, sc, &sc->irq_ih);
326 device_printf(dev, "Unable to register interrupt handler\n");
327 mtx_destroy(&sc->lock);
335 ie_ack(struct ie_softc *sc, u_int mask)
338 sc->scb->ie_command = sc->scb->ie_status & mask;
339 (*sc->ie_chan_attn) (sc);
343 * What to do upon receipt of an interrupt.
348 struct ie_softc *sc = (struct ie_softc *)xsc;
353 /* Clear the interrupt latch on the 3C507. */
354 if (sc->hard_type == IE_3C507
355 && (inb(PORT(sc) + IE507_CTRL) & EL_CTRL_INTL))
356 outb(PORT(sc) + IE507_ICTRL, 1);
358 /* disable interrupts on the EE16. */
359 if (sc->hard_type == IE_EE16)
360 outb(PORT(sc) + IEE16_IRQ, sc->irq_encoded);
362 status = sc->scb->ie_status;
366 /* Don't ack interrupts which we didn't receive */
367 ie_ack(sc, IE_ST_WHENCE & status);
369 if (status & (IE_ST_RECV | IE_ST_RNR)) {
372 if (ie_debug & IED_RINT)
373 if_printf(sc->ifp, "rint\n");
380 if (status & IE_ST_DONE) {
383 if (ie_debug & IED_TINT)
384 if_printf(sc->ifp, "tint\n");
391 if (status & IE_ST_RNR) {
393 if (ie_debug & IED_RNR)
394 if_printf(sc->ifp, "rnr\n");
399 if ((status & IE_ST_ALLDONE) && (ie_debug & IED_CNA))
400 if_printf(sc->ifp, "cna\n");
403 if ((status = sc->scb->ie_status) & IE_ST_WHENCE)
406 /* Clear the interrupt latch on the 3C507. */
407 if (sc->hard_type == IE_3C507)
408 outb(PORT(sc) + IE507_ICTRL, 1);
410 /* enable interrupts on the EE16. */
411 if (sc->hard_type == IE_EE16)
412 outb(PORT(sc) + IEE16_IRQ, sc->irq_encoded | IEE16_IRQ_ENABLE);
417 * Process a received-frame interrupt.
420 ierint(struct ie_softc *sc)
423 static int timesthru = 1024;
427 status = sc->rframes[i]->ie_fd_status;
429 if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) {
430 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
432 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS,
433 sc->scb->ie_err_crc +
434 sc->scb->ie_err_align +
435 sc->scb->ie_err_resource +
436 sc->scb->ie_err_overrun);
437 sc->scb->ie_err_crc = 0;
438 sc->scb->ie_err_align = 0;
439 sc->scb->ie_err_resource = 0;
440 sc->scb->ie_err_overrun = 0;
445 if (status & IE_FD_RNR) {
446 if (!(sc->scb->ie_status & IE_RU_READY)) {
447 sc->rframes[0]->ie_fd_next =
448 MK_16(MEM(sc), sc->rbuffs[0]);
449 sc->scb->ie_recv_list =
450 MK_16(MEM(sc), sc->rframes[0]);
451 command_and_wait(sc, IE_RU_START, 0, 0);
456 i = (i + 1) % sc->nframes;
462 * Process a command-complete interrupt. These are only generated by
463 * the transmission of frames. This routine is deceptively simple, since
464 * most of the real work is done by iestart().
467 ietint(struct ie_softc *sc)
469 struct ifnet *ifp = sc->ifp;
473 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
475 for (i = 0; i < sc->xmit_count; i++) {
476 status = sc->xmit_cmds[i]->ie_xmit_status;
478 if (status & IE_XS_LATECOLL) {
479 if_printf(ifp, "late collision\n");
480 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1);
481 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
482 } else if (status & IE_XS_NOCARRIER) {
483 if_printf(ifp, "no carrier\n");
484 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
485 } else if (status & IE_XS_LOSTCTS) {
486 if_printf(ifp, "lost CTS\n");
487 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
488 } else if (status & IE_XS_UNDERRUN) {
489 if_printf(ifp, "DMA underrun\n");
490 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
491 } else if (status & IE_XS_EXCMAX) {
492 if_printf(ifp, "too many collisions\n");
493 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 16);
494 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
496 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
497 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, status & IE_XS_MAXCOLL);
503 * If multicast addresses were added or deleted while we were
504 * transmitting, ie_mc_reset() set the want_mcsetup flag indicating
505 * that we should do it.
507 if (sc->want_mcsetup) {
509 sc->want_mcsetup = 0;
511 /* Wish I knew why this seems to be necessary... */
512 sc->xmit_cmds[0]->ie_xmit_status |= IE_STAT_COMPL;
515 return (0); /* shouldn't be necessary */
519 * Process a receiver-not-ready interrupt. I believe that we get these
520 * when there aren't enough buffers to go around. For now (FIXME), we
521 * just restart the receiver, and hope everything's ok.
524 iernr(struct ie_softc *sc)
527 setup_rfa(sc, (v_caddr_t) sc->rframes[0]);
529 sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]);
530 command_and_wait(sc, IE_RU_START, 0, 0);
532 /* This doesn't work either, but it doesn't hang either. */
533 command_and_wait(sc, IE_RU_DISABLE, 0, 0); /* just in case */
534 setup_rfa(sc, (v_caddr_t) sc->rframes[0]); /* ignore cast-qual */
536 sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]);
537 command_and_wait(sc, IE_RU_START, 0, 0); /* was ENABLE */
540 ie_ack(sc, IE_ST_WHENCE);
542 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
547 * Compare two Ether/802 addresses for equality, inlined and
548 * unrolled for speed. I'd love to have an inline assembler
552 ether_equal(u_char * one, u_char * two)
554 if (one[0] != two[0])
556 if (one[1] != two[1])
558 if (one[2] != two[2])
560 if (one[3] != two[3])
562 if (one[4] != two[4])
564 if (one[5] != two[5])
570 * Determine quickly whether we should bother reading in this packet.
571 * This depends on whether BPF and/or bridging is enabled, whether we
572 * are receiving multicast address, and whether promiscuous mode is enabled.
573 * We assume that if IFF_PROMISC is set, then *somebody* wants to see
574 * all incoming packets.
577 check_eh(struct ie_softc *sc, struct ether_header *eh)
579 /* Optimize the common case: normal operation. We've received
580 either a unicast with our dest or a multicast packet. */
581 if (sc->promisc == 0) {
584 /* If not multicast, it's definitely for us */
585 if ((eh->ether_dhost[0] & 1) == 0)
588 /* Accept broadcasts (loose but fast check) */
589 if (eh->ether_dhost[0] == 0xff)
592 /* Compare against our multicast addresses */
593 for (i = 0; i < sc->mcast_count; i++) {
594 if (ether_equal(eh->ether_dhost,
595 (u_char *)&sc->mcast_addrs[i]))
601 /* Always accept packets when in promiscuous mode */
602 if ((sc->promisc & IFF_PROMISC) != 0)
605 /* Always accept packets directed at us */
606 if (ether_equal(eh->ether_dhost, IF_LLADDR(sc->ifp)))
609 /* Must have IFF_ALLMULTI but not IFF_PROMISC set. The chip is
610 actually in promiscuous mode, so discard unicast packets. */
611 return((eh->ether_dhost[0] & 1) != 0);
615 * We want to isolate the bits that have meaning... This assumes that
616 * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds
617 * the size of the buffer, then we are screwed anyway.
620 ie_buflen(struct ie_softc *sc, int head)
622 return (sc->rbuffs[head]->ie_rbd_actual
623 & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1)));
627 ie_packet_len(struct ie_softc *sc)
630 int head = sc->rbhead;
634 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
636 print_rbd(sc->rbuffs[sc->rbhead]);
639 "%s: receive descriptors out of sync at %d\n",
640 sc->ifp->if_xname, sc->rbhead);
644 i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST;
646 acc += ie_buflen(sc, head);
647 head = (head + 1) % sc->nrxbufs;
654 * Read data off the interface, and turn it into an mbuf chain.
656 * This code is DRAMATICALLY different from the previous version; this
657 * version tries to allocate the entire mbuf chain up front, given the
658 * length of the data available. This enables us to allocate mbuf
659 * clusters in many situations where before we would have had a long
660 * chain of partially-full mbufs. This should help to speed up the
661 * operation considerably. (Provided that it works, of course.)
664 ieget(struct ie_softc *sc, struct mbuf **mp)
666 struct ether_header eh;
667 struct mbuf *m, *top, **mymp;
673 totlen = ie_packet_len(sc);
678 * Snarf the Ethernet header.
680 bcopy(sc->cbuffs[sc->rbhead], &eh, sizeof(struct ether_header));
681 /* ignore cast-qual warning here */
684 * As quickly as possible, check if this packet is for us. If not,
685 * don't waste a single cycle copying the rest of the packet in.
686 * This is only a consideration when FILTER is defined; i.e., when
687 * we are either running BPF or doing multicasting.
689 if (!check_eh(sc, &eh)) {
690 ie_drop_packet_buffer(sc);
694 MGETHDR(m, M_NOWAIT, MT_DATA);
696 ie_drop_packet_buffer(sc);
701 m->m_pkthdr.rcvif = sc->ifp;
703 resid = m->m_pkthdr.len = totlen;
709 * This loop goes through and allocates mbufs for all the data we
710 * will be copying in. It does not actually do the copying yet.
712 do { /* while(resid > 0) */
714 * Try to allocate an mbuf to hold the data that we have.
715 * If we already allocated one, just get another one and
716 * stick it on the end (eventually). If we don't already
717 * have one, try to allocate an mbuf cluster big enough to
718 * hold the whole packet, if we think it's reasonable, or a
719 * single mbuf which may or may not be big enough. Got that?
722 MGET(m, M_NOWAIT, MT_DATA);
725 ie_drop_packet_buffer(sc);
730 if (resid >= MINCLSIZE) {
731 if (MCLGET(m, M_NOWAIT))
732 m->m_len = min(resid, MCLBYTES);
734 if (resid < m->m_len) {
735 if (!top && resid + max_linkhdr <= m->m_len)
736 m->m_data += max_linkhdr;
745 resid = totlen; /* remaining data */
746 offset = 0; /* packet offset */
747 thismboff = 0; /* offset in m */
749 m = top; /* current mbuf */
750 head = sc->rbhead; /* current rx buffer */
753 * Now we take the mbuf chain (hopefully only one mbuf most of the
754 * time) and stuff the data into it. There are no possible failures
755 * at or after this point.
757 while (resid > 0) { /* while there's stuff left */
758 int thislen = ie_buflen(sc, head) - offset;
761 * If too much data for the current mbuf, then fill the
762 * current one up, go to the next one, and try again.
764 if (thislen > m->m_len - thismboff) {
765 int newlen = m->m_len - thismboff;
767 bcopy((v_caddr_t) (sc->cbuffs[head] + offset),
768 mtod(m, caddr_t) +thismboff, (unsigned) newlen);
769 /* ignore cast-qual warning */
771 thismboff = 0; /* new mbuf, so no offset */
772 offset += newlen; /* we are now this far into
774 resid -= newlen; /* so there is this much left
779 * If there is more than enough space in the mbuf to hold
780 * the contents of this buffer, copy everything in, advance
781 * pointers, and so on.
783 if (thislen < m->m_len - thismboff) {
784 bcopy((v_caddr_t) (sc->cbuffs[head] + offset),
785 mtod(m, caddr_t) +thismboff, (unsigned) thislen);
786 thismboff += thislen; /* we are this far into the
788 resid -= thislen; /* and this much is left */
792 * Otherwise, there is exactly enough space to put this
793 * buffer's contents into the current mbuf. Do the
794 * combination of the above actions.
796 bcopy((v_caddr_t) (sc->cbuffs[head] + offset),
797 mtod(m, caddr_t) + thismboff, (unsigned) thislen);
799 thismboff = 0; /* new mbuf, start at the beginning */
800 resid -= thislen; /* and we are this far through */
803 * Advance all the pointers. We can get here from either of
804 * the last two cases, but never the first.
808 sc->rbuffs[head]->ie_rbd_actual = 0;
809 sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST;
810 sc->rbhead = head = (head + 1) % sc->nrxbufs;
811 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
812 sc->rbtail = (sc->rbtail + 1) % sc->nrxbufs;
816 * Unless something changed strangely while we were doing the copy,
817 * we have now copied everything in from the shared memory. This
818 * means that we are done.
824 * Read frame NUM from unit UNIT (pre-cached as IE).
826 * This routine reads the RFD at NUM, and copies in the buffers from
827 * the list of RBD, then rotates the RBD and RFD lists so that the receiver
828 * doesn't start complaining. Trailers are DROPPED---there's no point
829 * in wasting time on confusing code to deal with them. Hopefully,
830 * this machine will never ARP for trailers anyway.
833 ie_readframe(struct ie_softc *sc, int num/* frame number to read */)
835 struct ifnet *ifp = sc->ifp;
836 struct ie_recv_frame_desc rfd;
839 struct ether_header *eh;
842 bcopy((v_caddr_t) (sc->rframes[num]), &rfd,
843 sizeof(struct ie_recv_frame_desc));
846 * Immediately advance the RFD list, since we we have copied ours
849 sc->rframes[num]->ie_fd_status = 0;
850 sc->rframes[num]->ie_fd_last |= IE_FD_LAST;
851 sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST;
852 sc->rftail = (sc->rftail + 1) % sc->nframes;
853 sc->rfhead = (sc->rfhead + 1) % sc->nframes;
855 if (rfd.ie_fd_status & IE_FD_OK) {
857 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); /* this counts as an
863 eh = mtod(m, struct ether_header *);
864 if (ie_debug & IED_READFRAME) {
865 if_printf(ifp, "frame from ether %6D type %x\n",
866 eh->ether_shost, ":", (unsigned) eh->ether_type);
868 if (ntohs(eh->ether_type) > ETHERTYPE_TRAIL
869 && ntohs(eh->ether_type) < (ETHERTYPE_TRAIL + ETHERTYPE_NTRAILER))
870 printf("received trailer!\n");
877 * Finally pass this packet up to higher layers.
880 (*ifp->if_input)(ifp, m);
885 ie_drop_packet_buffer(struct ie_softc *sc)
891 * This means we are somehow out of sync. So, we reset the
894 if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) {
896 print_rbd(sc->rbuffs[sc->rbhead]);
898 log(LOG_ERR, "%s: receive descriptors out of sync at %d\n",
899 sc->ifp->if_xname, sc->rbhead);
903 i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST;
905 sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST;
906 sc->rbuffs[sc->rbhead]->ie_rbd_actual = 0;
907 sc->rbhead = (sc->rbhead + 1) % sc->nrxbufs;
908 sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST;
909 sc->rbtail = (sc->rbtail + 1) % sc->nrxbufs;
915 * Start transmission on an interface.
918 iestart(struct ifnet *ifp)
920 struct ie_softc *sc = ifp->if_softc;
928 iestart_locked(struct ifnet *ifp)
930 struct ie_softc *sc = ifp->if_softc;
932 volatile unsigned char *buffer;
936 * This is not really volatile, in this routine, but it makes gcc
939 volatile u_short *bptr = &sc->scb->ie_command_list;
941 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
943 if (ifp->if_drv_flags & IFF_DRV_OACTIVE)
947 IF_DEQUEUE(&sc->ifp->if_snd, m);
953 buffer = sc->xmit_cbuffs[sc->xmit_count];
956 for (m0 = m; m && len < IE_BUF_LEN; m = m->m_next) {
957 bcopy(mtod(m, caddr_t), buffer, m->m_len);
963 len = max(len, ETHER_MIN_LEN);
965 sc->xmit_buffs[sc->xmit_count]->ie_xmit_flags =
967 sc->xmit_buffs[sc->xmit_count]->ie_xmit_next = 0xffff;
968 sc->xmit_buffs[sc->xmit_count]->ie_xmit_buf =
969 MK_24(sc->iomem, sc->xmit_cbuffs[sc->xmit_count]);
971 sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_cmd = IE_CMD_XMIT;
972 sc->xmit_cmds[sc->xmit_count]->ie_xmit_status = 0;
973 sc->xmit_cmds[sc->xmit_count]->ie_xmit_desc =
974 MK_16(sc->iomem, sc->xmit_buffs[sc->xmit_count]);
976 *bptr = MK_16(sc->iomem, sc->xmit_cmds[sc->xmit_count]);
977 bptr = &sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_link;
979 } while (sc->xmit_count < sc->ntxbufs);
982 * If we queued up anything for transmission, send it.
984 if (sc->xmit_count) {
985 sc->xmit_cmds[sc->xmit_count - 1]->com.ie_cmd_cmd |=
986 IE_CMD_LAST | IE_CMD_INTR;
989 * By passing the command pointer as a null, we tell
990 * command_and_wait() to pretend that this isn't an action
991 * command. I wish I understood what was happening here.
993 command_and_wait(sc, IE_CU_START, 0, 0);
994 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1000 * Check to see if there's an 82586 out there.
1003 check_ie_present(struct ie_softc *sc)
1005 volatile struct ie_sys_conf_ptr *scp;
1006 volatile struct ie_int_sys_conf_ptr *iscp;
1007 volatile struct ie_sys_ctl_block *scb;
1010 realbase = (uintptr_t) sc->iomembot + sc->iosize - (1 << 24);
1012 scp = (volatile struct ie_sys_conf_ptr *) (uintptr_t)
1013 (realbase + IE_SCP_ADDR);
1014 bzero((volatile char *) scp, sizeof *scp);
1017 * First we put the ISCP at the bottom of memory; this tests to make
1018 * sure that our idea of the size of memory is the same as the
1019 * controller's. This is NOT where the ISCP will be in normal
1022 iscp = (volatile struct ie_int_sys_conf_ptr *) sc->iomembot;
1023 bzero((volatile char *)iscp, sizeof *iscp);
1025 scb = (volatile struct ie_sys_ctl_block *) sc->iomembot;
1026 bzero((volatile char *)scb, sizeof *scb);
1028 scp->ie_bus_use = sc->bus_use; /* 8-bit or 16-bit */
1029 scp->ie_iscp_ptr = (caddr_t) (uintptr_t)
1030 ((volatile char *) iscp - (volatile char *) (uintptr_t) realbase);
1033 iscp->ie_scb_offset = MK_16(realbase, scb) + 256;
1035 (*sc->ie_reset_586) (sc);
1036 (*sc->ie_chan_attn) (sc);
1038 DELAY(100); /* wait a while... */
1040 if (iscp->ie_busy) {
1044 * Now relocate the ISCP to its real home, and reset the controller
1047 iscp = (void *) Align((caddr_t) (uintptr_t)
1048 (realbase + IE_SCP_ADDR -
1049 sizeof(struct ie_int_sys_conf_ptr)));
1050 bzero((volatile char *) iscp, sizeof *iscp); /* ignore cast-qual */
1052 scp->ie_iscp_ptr = (caddr_t) (uintptr_t)
1053 ((volatile char *) iscp - (volatile char *) (uintptr_t) realbase);
1056 iscp->ie_scb_offset = MK_16(realbase, scb);
1058 (*sc->ie_reset_586) (sc);
1059 (*sc->ie_chan_attn) (sc);
1063 if (iscp->ie_busy) {
1066 sc->iomem = (caddr_t) (uintptr_t) realbase;
1072 * Acknowledge any interrupts we may have caused...
1074 ie_ack(sc, IE_ST_WHENCE);
1080 el_reset_586(struct ie_softc *sc)
1082 outb(PORT(sc) + IE507_CTRL, EL_CTRL_RESET);
1084 outb(PORT(sc) + IE507_CTRL, EL_CTRL_NORMAL);
1089 sl_reset_586(struct ie_softc *sc)
1091 outb(PORT(sc) + IEATT_RESET, 0);
1095 ee16_reset_586(struct ie_softc *sc)
1097 outb(PORT(sc) + IEE16_ECTRL, IEE16_RESET_586);
1099 outb(PORT(sc) + IEE16_ECTRL, 0);
1104 el_chan_attn(struct ie_softc *sc)
1106 outb(PORT(sc) + IE507_ATTN, 1);
1110 sl_chan_attn(struct ie_softc *sc)
1112 outb(PORT(sc) + IEATT_ATTN, 0);
1116 ee16_chan_attn(struct ie_softc *sc)
1118 outb(PORT(sc) + IEE16_ATTN, 0);
1121 static __inline void
1122 ee16_interrupt_enable(struct ie_softc *sc)
1125 outb(sc->port + IEE16_IRQ, sc->irq_encoded | IEE16_IRQ_ENABLE);
1130 sl_read_ether(struct ie_softc *sc, unsigned char *addr)
1134 for (i = 0; i < 6; i++)
1135 addr[i] = inb(PORT(sc) + i);
1139 iereset(struct ie_softc *sc)
1141 struct ifnet *ifp = sc->ifp;
1143 if_printf(ifp, "reset\n");
1147 * Stop i82586 dead in its tracks.
1149 if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0))
1150 if_printf(ifp, "abort commands timed out\n");
1152 if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0))
1153 if_printf(ifp, "disable commands timed out\n");
1156 if (!check_ie_present(sc))
1157 panic("ie disappeared!");
1160 if (ifp->if_flags & IFF_UP)
1167 * Send a command to the controller and wait for it to either
1168 * complete or be accepted, depending on the command. If the
1169 * command pointer is null, then pretend that the command is
1170 * not an action command. If the command pointer is not null,
1171 * and the command is an action command, wait for
1172 * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK
1176 command_and_wait(struct ie_softc *sc, int cmd, volatile void *pcmd, int mask)
1178 volatile struct ie_cmd_common *cc = pcmd;
1181 sc->scb->ie_command = (u_short) cmd;
1183 if (IE_ACTION_COMMAND(cmd) && pcmd) {
1184 (*sc->ie_chan_attn) (sc);
1187 * Now spin-lock waiting for status. This is not a very
1188 * nice thing to do, but I haven't figured out how, or
1189 * indeed if, we can put the process waiting for action to
1190 * sleep. (We may be getting called through some other
1191 * timeout running in the kernel.)
1193 * According to the packet driver, the minimum timeout
1194 * should be .369 seconds, which we round up to .37.
1196 for (i = 0; i < 370; i++) {
1197 if (cc->ie_cmd_status & mask)
1206 * Otherwise, just wait for the command to be accepted.
1208 (*sc->ie_chan_attn) (sc);
1210 while (sc->scb->ie_command); /* spin lock */
1217 * Run the time-domain reflectometer...
1220 run_tdr(struct ie_softc *sc, volatile struct ie_tdr_cmd *cmd)
1224 cmd->com.ie_cmd_status = 0;
1225 cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST;
1226 cmd->com.ie_cmd_link = 0xffff;
1227 cmd->ie_tdr_time = 0;
1229 sc->scb->ie_command_list = MK_16(MEM(sc), cmd);
1230 cmd->ie_tdr_time = 0;
1232 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL))
1235 result = cmd->ie_tdr_time;
1237 ie_ack(sc, IE_ST_WHENCE);
1239 if (result & IE_TDR_SUCCESS)
1242 if (result & IE_TDR_XCVR) {
1243 if_printf(sc->ifp, "transceiver problem\n");
1244 } else if (result & IE_TDR_OPEN) {
1245 if_printf(sc->ifp, "TDR detected an open %d clocks away\n",
1246 result & IE_TDR_TIME);
1247 } else if (result & IE_TDR_SHORT) {
1248 if_printf(sc->ifp, "TDR detected a short %d clocks away\n",
1249 result & IE_TDR_TIME);
1251 if_printf(sc->ifp, "TDR returned unknown status %x\n", result);
1256 start_receiver(struct ie_softc *sc)
1259 sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]);
1260 command_and_wait(sc, IE_RU_START, 0, 0);
1262 ie_ack(sc, IE_ST_WHENCE);
1266 * Here is a helper routine for iernr() and ieinit(). This sets up
1270 setup_rfa(struct ie_softc *sc, v_caddr_t ptr)
1272 volatile struct ie_recv_frame_desc *rfd = (volatile void *)ptr;
1273 volatile struct ie_recv_buf_desc *rbd;
1276 /* First lay them out */
1277 for (i = 0; i < sc->nframes; i++) {
1278 sc->rframes[i] = rfd;
1279 bzero((volatile char *) rfd, sizeof *rfd); /* ignore cast-qual */
1283 ptr = Alignvol(rfd); /* ignore cast-qual */
1285 /* Now link them together */
1286 for (i = 0; i < sc->nframes; i++) {
1287 sc->rframes[i]->ie_fd_next =
1288 MK_16(MEM(sc), sc->rframes[(i + 1) % sc->nframes]);
1291 /* Finally, set the EOL bit on the last one. */
1292 sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST;
1295 * Now lay out some buffers for the incoming frames. Note that we
1296 * set aside a bit of slop in each buffer, to make sure that we have
1297 * enough space to hold a single frame in every buffer.
1299 rbd = (volatile void *) ptr;
1301 for (i = 0; i < sc->nrxbufs; i++) {
1302 sc->rbuffs[i] = rbd;
1303 bzero((volatile char *)rbd, sizeof *rbd);
1304 ptr = Alignvol(ptr + sizeof *rbd);
1305 rbd->ie_rbd_length = IE_RBUF_SIZE;
1306 rbd->ie_rbd_buffer = MK_24(MEM(sc), ptr);
1307 sc->cbuffs[i] = (volatile void *) ptr;
1308 ptr += IE_RBUF_SIZE;
1309 rbd = (volatile void *) ptr;
1312 /* Now link them together */
1313 for (i = 0; i < sc->nrxbufs; i++) {
1314 sc->rbuffs[i]->ie_rbd_next =
1315 MK_16(MEM(sc), sc->rbuffs[(i + 1) % sc->nrxbufs]);
1318 /* Tag EOF on the last one */
1319 sc->rbuffs[sc->nrxbufs - 1]->ie_rbd_length |= IE_RBD_LAST;
1322 * We use the head and tail pointers on receive to keep track of the
1323 * order in which RFDs and RBDs are used.
1326 sc->rftail = sc->nframes - 1;
1328 sc->rbtail = sc->nrxbufs - 1;
1330 sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]);
1331 sc->rframes[0]->ie_fd_buf_desc = MK_16(MEM(sc), sc->rbuffs[0]);
1333 ptr = Alignvol(ptr);
1338 * Run the multicast setup command.
1341 mc_setup(struct ie_softc *sc)
1343 volatile struct ie_mcast_cmd *cmd = (volatile void *)sc->xmit_cbuffs[0];
1345 cmd->com.ie_cmd_status = 0;
1346 cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST;
1347 cmd->com.ie_cmd_link = 0xffff;
1349 /* ignore cast-qual */
1350 bcopy((v_caddr_t) sc->mcast_addrs, (v_caddr_t) cmd->ie_mcast_addrs,
1351 sc->mcast_count * sizeof *sc->mcast_addrs);
1353 cmd->ie_mcast_bytes = sc->mcast_count * 6; /* grrr... */
1355 sc->scb->ie_command_list = MK_16(MEM(sc), cmd);
1356 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL)
1357 || !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1358 if_printf(sc->ifp, "multicast address setup command failed\n");
1365 * This routine takes the environment generated by check_ie_present()
1366 * and adds to it all the other structures we need to operate the adapter.
1367 * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands,
1368 * starting the receiver unit, and clearing interrupts.
1374 struct ie_softc *sc = xsc;
1382 ieinit_locked(struct ie_softc *sc)
1384 struct ifnet *ifp = sc->ifp;
1385 volatile struct ie_sys_ctl_block *scb = sc->scb;
1389 ptr = Alignvol((volatile char *) scb + sizeof *scb);
1392 * Send the configure command first.
1395 volatile struct ie_config_cmd *cmd = (volatile void *) ptr;
1397 ie_setup_config(cmd, sc->promisc,
1398 sc->hard_type == IE_STARLAN10);
1399 cmd->com.ie_cmd_status = 0;
1400 cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST;
1401 cmd->com.ie_cmd_link = 0xffff;
1403 scb->ie_command_list = MK_16(MEM(sc), cmd);
1405 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL)
1406 || !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1407 if_printf(ifp, "configure command failed\n");
1412 * Now send the Individual Address Setup command.
1415 volatile struct ie_iasetup_cmd *cmd = (volatile void *) ptr;
1417 cmd->com.ie_cmd_status = 0;
1418 cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST;
1419 cmd->com.ie_cmd_link = 0xffff;
1421 bcopy((volatile char *)IF_LLADDR(ifp),
1422 (volatile char *)&cmd->ie_address, sizeof cmd->ie_address);
1423 scb->ie_command_list = MK_16(MEM(sc), cmd);
1424 if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL)
1425 || !(cmd->com.ie_cmd_status & IE_STAT_OK)) {
1426 if_printf(ifp, "individual address "
1427 "setup command failed\n");
1433 * Now run the time-domain reflectometer.
1435 run_tdr(sc, (volatile void *) ptr);
1438 * Acknowledge any interrupts we have generated thus far.
1440 ie_ack(sc, IE_ST_WHENCE);
1445 ptr = setup_rfa(sc, ptr);
1448 * Finally, the transmit command and buffer are the last little bit
1452 /* transmit command buffers */
1453 for (i = 0; i < sc->ntxbufs; i++) {
1454 sc->xmit_cmds[i] = (volatile void *) ptr;
1455 ptr += sizeof *sc->xmit_cmds[i];
1456 ptr = Alignvol(ptr);
1457 sc->xmit_buffs[i] = (volatile void *)ptr;
1458 ptr += sizeof *sc->xmit_buffs[i];
1459 ptr = Alignvol(ptr);
1462 /* transmit buffers */
1463 for (i = 0; i < sc->ntxbufs - 1; i++) {
1464 sc->xmit_cbuffs[i] = (volatile void *)ptr;
1466 ptr = Alignvol(ptr);
1468 sc->xmit_cbuffs[sc->ntxbufs - 1] = (volatile void *) ptr;
1470 for (i = 1; i < sc->ntxbufs; i++) {
1471 bzero((v_caddr_t) sc->xmit_cmds[i], sizeof *sc->xmit_cmds[i]);
1472 bzero((v_caddr_t) sc->xmit_buffs[i], sizeof *sc->xmit_buffs[i]);
1476 * This must be coordinated with iestart() and ietint().
1478 sc->xmit_cmds[0]->ie_xmit_status = IE_STAT_COMPL;
1480 /* take the ee16 out of loopback */
1481 if (sc->hard_type == IE_EE16) {
1482 u_int8_t bart_config;
1484 bart_config = inb(PORT(sc) + IEE16_CONFIG);
1485 bart_config &= ~IEE16_BART_LOOPBACK;
1486 /* inb doesn't get bit! */
1487 bart_config |= IEE16_BART_MCS16_TEST;
1488 outb(PORT(sc) + IEE16_CONFIG, bart_config);
1489 ee16_interrupt_enable(sc);
1492 ifp->if_drv_flags |= IFF_DRV_RUNNING; /* tell higher levels
1494 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1502 ie_stop(struct ie_softc *sc)
1504 struct ifnet *ifp = sc->ifp;
1506 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1507 command_and_wait(sc, IE_RU_DISABLE, 0, 0);
1511 ieioctl(struct ifnet *ifp, u_long command, caddr_t data)
1514 struct ie_softc *sc = ifp->if_softc;
1519 * Note that this device doesn't have an "all multicast"
1520 * mode, so we must turn on promiscuous mode and do the
1521 * filtering manually.
1524 if ((ifp->if_flags & IFF_UP) == 0 &&
1525 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1527 } else if ((ifp->if_flags & IFF_UP) &&
1528 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1530 ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1532 } else if (sc->promisc ^
1533 (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))) {
1535 ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI);
1544 * Update multicast listeners
1546 /* reset multicast filtering */
1554 error = ether_ioctl(ifp, command, data);
1562 ie_mc_reset(struct ie_softc *sc)
1564 struct ifmultiaddr *ifma;
1567 * Step through the list of addresses.
1569 sc->mcast_count = 0;
1570 if_maddr_rlock(sc->ifp);
1571 TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) {
1572 if (ifma->ifma_addr->sa_family != AF_LINK)
1575 /* XXX - this is broken... */
1576 if (sc->mcast_count >= MAXMCAST) {
1577 sc->ifp->if_flags |= IFF_ALLMULTI;
1578 if (sc->ifp->if_flags & IFF_UP)
1582 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
1583 &(sc->mcast_addrs[sc->mcast_count]), 6);
1586 if_maddr_runlock(sc->ifp);
1589 sc->want_mcsetup = 1;
1595 print_rbd(volatile struct ie_recv_buf_desc * rbd)
1597 printf("RBD at %p:\n"
1598 "actual %04x, next %04x, buffer %p\n"
1599 "length %04x, mbz %04x\n",
1600 (volatile void *) rbd,
1601 rbd->ie_rbd_actual, rbd->ie_rbd_next,
1602 (void *) rbd->ie_rbd_buffer,
1603 rbd->ie_rbd_length, rbd->mbz);
1609 ie_alloc_resources (device_t dev)
1611 struct ie_softc * sc;
1615 sc = device_get_softc(dev);
1617 sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid,
1620 device_printf(dev, "No I/O space?!\n");
1624 sc->io_bt = rman_get_bustag(sc->io_res);
1625 sc->io_bh = rman_get_bushandle(sc->io_res);
1627 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
1630 device_printf(dev, "No Memory!\n");
1634 sc->mem_bt = rman_get_bustag(sc->mem_res);
1635 sc->mem_bh = rman_get_bushandle(sc->mem_res);
1637 sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
1640 device_printf(dev, "No IRQ!\n");
1645 sc->port = rman_get_start(sc->io_res); /* XXX hack */
1646 sc->iomembot = rman_get_virtual(sc->mem_res);
1647 sc->iosize = rman_get_size(sc->mem_res);
1655 ie_release_resources (device_t dev)
1657 struct ie_softc * sc;
1659 sc = device_get_softc(dev);
1662 bus_teardown_intr(dev, sc->irq_res, sc->irq_ih);
1664 free(sc->rframes, M_DEVBUF);
1666 bus_release_resource(dev, SYS_RES_IOPORT,
1667 sc->io_rid, sc->io_res);
1669 bus_release_resource(dev, SYS_RES_IRQ,
1670 sc->irq_rid, sc->irq_res);
1672 bus_release_resource(dev, SYS_RES_MEMORY,
1673 sc->mem_rid, sc->mem_res);
1681 ie_detach (device_t dev)
1683 struct ie_softc * sc;
1686 sc = device_get_softc(dev);
1690 if (sc->hard_type == IE_EE16)
1695 ether_ifdetach(ifp);
1696 ie_release_resources(dev);
1697 mtx_destroy(&sc->lock);