]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/arm/lpc/if_lpe.c
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / arm / lpc / if_lpe.c
1 /*-
2  * Copyright (c) 2011 Jakub Wojciech Klama <jceel@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/endian.h>
32 #include <sys/systm.h>
33 #include <sys/sockio.h>
34 #include <sys/mbuf.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/rman.h>
41 #include <sys/bus.h>
42 #include <sys/socket.h>
43 #include <machine/bus.h>
44 #include <machine/intr.h>
45
46 #include <net/if.h>
47 #include <net/if_arp.h>
48 #include <net/ethernet.h>
49 #include <net/if_dl.h>
50 #include <net/if_media.h>
51 #include <net/if_types.h>
52 #include <net/if_var.h>
53
54 #include <net/bpf.h>
55
56 #include <dev/ofw/ofw_bus.h>
57 #include <dev/ofw/ofw_bus_subr.h>
58
59 #include <dev/mii/mii.h>
60 #include <dev/mii/miivar.h>
61
62 #include <arm/lpc/lpcreg.h>
63 #include <arm/lpc/lpcvar.h>
64 #include <arm/lpc/if_lpereg.h>
65
66 #include "miibus_if.h"
67
68 #ifdef DEBUG
69 #define debugf(fmt, args...) do { printf("%s(): ", __func__);   \
70     printf(fmt,##args); } while (0)
71 #else
72 #define debugf(fmt, args...)
73 #endif
74
75 struct lpe_dmamap_arg {
76         bus_addr_t              lpe_dma_busaddr;
77 };
78
79 struct lpe_rxdesc {
80         struct mbuf *           lpe_rxdesc_mbuf;
81         bus_dmamap_t            lpe_rxdesc_dmamap;
82 };
83
84 struct lpe_txdesc {
85         int                     lpe_txdesc_first;
86         struct mbuf *           lpe_txdesc_mbuf;
87         bus_dmamap_t            lpe_txdesc_dmamap;
88 };
89
90 struct lpe_chain_data {
91         bus_dma_tag_t           lpe_parent_tag;
92         bus_dma_tag_t           lpe_tx_ring_tag;
93         bus_dmamap_t            lpe_tx_ring_map;
94         bus_dma_tag_t           lpe_tx_status_tag;
95         bus_dmamap_t            lpe_tx_status_map;
96         bus_dma_tag_t           lpe_tx_buf_tag;
97         bus_dma_tag_t           lpe_rx_ring_tag;
98         bus_dmamap_t            lpe_rx_ring_map;
99         bus_dma_tag_t           lpe_rx_status_tag;
100         bus_dmamap_t            lpe_rx_status_map;
101         bus_dma_tag_t           lpe_rx_buf_tag;
102         struct lpe_rxdesc       lpe_rx_desc[LPE_RXDESC_NUM];
103         struct lpe_txdesc       lpe_tx_desc[LPE_TXDESC_NUM];
104         int                     lpe_tx_prod;
105         int                     lpe_tx_last;
106         int                     lpe_tx_used;
107 };
108
109 struct lpe_ring_data {
110         struct lpe_hwdesc *     lpe_rx_ring;
111         struct lpe_hwstatus *   lpe_rx_status;
112         bus_addr_t              lpe_rx_ring_phys;
113         bus_addr_t              lpe_rx_status_phys;
114         struct lpe_hwdesc *     lpe_tx_ring;
115         struct lpe_hwstatus *   lpe_tx_status;
116         bus_addr_t              lpe_tx_ring_phys;
117         bus_addr_t              lpe_tx_status_phys;
118 };
119
120 struct lpe_softc {
121         struct ifnet *          lpe_ifp;
122         struct mtx              lpe_mtx;
123         phandle_t               lpe_ofw;
124         device_t                lpe_dev;
125         device_t                lpe_miibus;
126         uint8_t                 lpe_enaddr[6];
127         struct resource *       lpe_mem_res;
128         struct resource *       lpe_irq_res;
129         void *                  lpe_intrhand;
130         bus_space_tag_t         lpe_bst;
131         bus_space_handle_t      lpe_bsh;
132 #define LPE_FLAG_LINK           (1 << 0)
133         uint32_t                lpe_flags;
134         int                     lpe_watchdog_timer;
135         struct callout          lpe_tick;
136         struct lpe_chain_data   lpe_cdata;
137         struct lpe_ring_data    lpe_rdata;
138 };
139
140 static int lpe_probe(device_t);
141 static int lpe_attach(device_t);
142 static int lpe_detach(device_t);
143 static int lpe_miibus_readreg(device_t, int, int);
144 static int lpe_miibus_writereg(device_t, int, int, int);
145 static void lpe_miibus_statchg(device_t);
146
147 static void lpe_reset(struct lpe_softc *);
148 static void lpe_init(void *);
149 static void lpe_init_locked(struct lpe_softc *);
150 static void lpe_start(struct ifnet *);
151 static void lpe_start_locked(struct ifnet *);
152 static void lpe_stop(struct lpe_softc *);
153 static void lpe_stop_locked(struct lpe_softc *);
154 static int lpe_ioctl(struct ifnet *, u_long, caddr_t);
155 static void lpe_set_rxmode(struct lpe_softc *);
156 static void lpe_set_rxfilter(struct lpe_softc *);
157 static void lpe_intr(void *);
158 static void lpe_rxintr(struct lpe_softc *);
159 static void lpe_txintr(struct lpe_softc *);
160 static void lpe_tick(void *);
161 static void lpe_watchdog(struct lpe_softc *);
162 static int lpe_encap(struct lpe_softc *, struct mbuf **);
163 static int lpe_dma_alloc(struct lpe_softc *);
164 static int lpe_dma_alloc_rx(struct lpe_softc *);
165 static int lpe_dma_alloc_tx(struct lpe_softc *);
166 static int lpe_init_rx(struct lpe_softc *);
167 static int lpe_init_rxbuf(struct lpe_softc *, int);
168 static void lpe_discard_rxbuf(struct lpe_softc *, int);
169 static void lpe_dmamap_cb(void *, bus_dma_segment_t *, int, int);
170 static int lpe_ifmedia_upd(struct ifnet *);
171 static void lpe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
172
173 #define lpe_lock(_sc)           mtx_lock(&(_sc)->lpe_mtx)
174 #define lpe_unlock(_sc)         mtx_unlock(&(_sc)->lpe_mtx)
175 #define lpe_lock_assert(sc)     mtx_assert(&(_sc)->lpe_mtx, MA_OWNED)
176
177 #define lpe_read_4(_sc, _reg)           \
178     bus_space_read_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg))
179 #define lpe_write_4(_sc, _reg, _val)    \
180     bus_space_write_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg), (_val))
181
182 #define LPE_HWDESC_RXERRS       (LPE_HWDESC_CRCERROR | LPE_HWDESC_SYMBOLERROR | \
183     LPE_HWDESC_LENGTHERROR | LPE_HWDESC_ALIGNERROR | LPE_HWDESC_OVERRUN | \
184     LPE_HWDESC_RXNODESCR)
185
186 #define LPE_HWDESC_TXERRS       (LPE_HWDESC_EXCDEFER | LPE_HWDESC_EXCCOLL | \
187     LPE_HWDESC_LATECOLL | LPE_HWDESC_UNDERRUN | LPE_HWDESC_TXNODESCR)
188
189 static int
190 lpe_probe(device_t dev)
191 {
192
193         if (!ofw_bus_status_okay(dev))
194                 return (ENXIO);
195
196         if (!ofw_bus_is_compatible(dev, "lpc,ethernet"))
197                 return (ENXIO);
198
199         device_set_desc(dev, "LPC32x0 10/100 Ethernet");
200         return (BUS_PROBE_DEFAULT);
201 }
202
203 static int
204 lpe_attach(device_t dev)
205 {
206         struct lpe_softc *sc = device_get_softc(dev);
207         struct ifnet *ifp;
208         int rid, i;
209         uint32_t val;
210
211         sc->lpe_dev = dev;
212         sc->lpe_ofw = ofw_bus_get_node(dev);
213
214         i = OF_getprop(sc->lpe_ofw, "local-mac-address", (void *)&sc->lpe_enaddr, 6);
215         if (i != 6) {
216                 sc->lpe_enaddr[0] = 0x00;
217                 sc->lpe_enaddr[1] = 0x11;
218                 sc->lpe_enaddr[2] = 0x22;
219                 sc->lpe_enaddr[3] = 0x33;
220                 sc->lpe_enaddr[4] = 0x44;
221                 sc->lpe_enaddr[5] = 0x55;
222         }
223
224         mtx_init(&sc->lpe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
225             MTX_DEF);
226
227         callout_init_mtx(&sc->lpe_tick, &sc->lpe_mtx, 0);
228
229         rid = 0;
230         sc->lpe_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
231             RF_ACTIVE);
232         if (!sc->lpe_mem_res) {
233                 device_printf(dev, "cannot allocate memory window\n");
234                 goto fail;
235         }
236
237         sc->lpe_bst = rman_get_bustag(sc->lpe_mem_res);
238         sc->lpe_bsh = rman_get_bushandle(sc->lpe_mem_res);
239
240         rid = 0;
241         sc->lpe_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
242             RF_ACTIVE);
243         if (!sc->lpe_irq_res) {
244                 device_printf(dev, "cannot allocate interrupt\n");
245                 goto fail;
246         }
247
248         sc->lpe_ifp = if_alloc(IFT_ETHER);
249         if (!sc->lpe_ifp) {
250                 device_printf(dev, "cannot allocated ifnet\n");
251                 goto fail;
252         }
253
254         ifp = sc->lpe_ifp;
255
256         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
257         ifp->if_softc = sc;
258         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
259         ifp->if_start = lpe_start;
260         ifp->if_ioctl = lpe_ioctl;
261         ifp->if_init = lpe_init;
262         IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
263         ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
264         IFQ_SET_READY(&ifp->if_snd);
265
266         ether_ifattach(ifp, sc->lpe_enaddr);
267
268         if (bus_setup_intr(dev, sc->lpe_irq_res, INTR_TYPE_NET, NULL,
269             lpe_intr, sc, &sc->lpe_intrhand)) {
270                 device_printf(dev, "cannot establish interrupt handler\n");
271                 ether_ifdetach(ifp);
272                 goto fail;
273         }
274
275         /* Enable Ethernet clock */
276         lpc_pwr_write(dev, LPC_CLKPWR_MACCLK_CTRL,
277             LPC_CLKPWR_MACCLK_CTRL_REG |
278             LPC_CLKPWR_MACCLK_CTRL_SLAVE |
279             LPC_CLKPWR_MACCLK_CTRL_MASTER |
280             LPC_CLKPWR_MACCLK_CTRL_HDWINF(3));
281
282         /* Reset chip */
283         lpe_reset(sc);
284
285         /* Initialize MII */
286         val = lpe_read_4(sc, LPE_COMMAND);
287         lpe_write_4(sc, LPE_COMMAND, val | LPE_COMMAND_RMII);
288
289         if (mii_attach(dev, &sc->lpe_miibus, ifp, lpe_ifmedia_upd,
290             lpe_ifmedia_sts, BMSR_DEFCAPMASK, 0x01, 
291             MII_OFFSET_ANY, 0)) {
292                 device_printf(dev, "cannot find PHY\n");
293                 goto fail;
294         }
295
296         lpe_dma_alloc(sc);
297
298         return (0);
299
300 fail:
301         if (sc->lpe_ifp)
302                 if_free(sc->lpe_ifp);
303         if (sc->lpe_intrhand)
304                 bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
305         if (sc->lpe_irq_res)
306                 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
307         if (sc->lpe_mem_res)
308                 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
309         return (ENXIO);
310 }
311
312 static int
313 lpe_detach(device_t dev)
314 {
315         struct lpe_softc *sc = device_get_softc(dev);
316
317         lpe_stop(sc);
318
319         if_free(sc->lpe_ifp);
320         bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
321         bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
322         bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
323
324         return (0);
325 }
326
327 static int
328 lpe_miibus_readreg(device_t dev, int phy, int reg)
329 {
330         struct lpe_softc *sc = device_get_softc(dev);
331         uint32_t val;
332         int result;
333
334         lpe_write_4(sc, LPE_MCMD, LPE_MCMD_READ);
335         lpe_write_4(sc, LPE_MADR, 
336             (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
337             (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
338
339         val = lpe_read_4(sc, LPE_MIND);
340
341         /* Wait until request is completed */
342         while (val & LPE_MIND_BUSY) {
343                 val = lpe_read_4(sc, LPE_MIND);
344                 DELAY(10);
345         }
346
347         if (val & LPE_MIND_INVALID)
348                 return (0);
349
350         lpe_write_4(sc, LPE_MCMD, 0);
351         result = (lpe_read_4(sc, LPE_MRDD) & LPE_MRDD_DATAMASK);
352         debugf("phy=%d reg=%d result=0x%04x\n", phy, reg, result);
353
354         return (result);
355 }
356
357 static int
358 lpe_miibus_writereg(device_t dev, int phy, int reg, int data)
359 {
360         struct lpe_softc *sc = device_get_softc(dev);
361         uint32_t val;
362
363         debugf("phy=%d reg=%d data=0x%04x\n", phy, reg, data);
364
365         lpe_write_4(sc, LPE_MCMD, LPE_MCMD_WRITE);
366         lpe_write_4(sc, LPE_MADR, 
367             (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
368             (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
369
370         lpe_write_4(sc, LPE_MWTD, (data & LPE_MWTD_DATAMASK));
371
372         val = lpe_read_4(sc, LPE_MIND);
373
374         /* Wait until request is completed */
375         while (val & LPE_MIND_BUSY) {
376                 val = lpe_read_4(sc, LPE_MIND);
377                 DELAY(10);
378         }
379
380         return (0);
381 }
382
383 static void
384 lpe_miibus_statchg(device_t dev)
385 {
386         struct lpe_softc *sc = device_get_softc(dev);
387         struct mii_data *mii = device_get_softc(sc->lpe_miibus);
388
389         lpe_lock(sc);
390
391         if ((mii->mii_media_status & IFM_ACTIVE) &&
392             (mii->mii_media_status & IFM_AVALID))
393                 sc->lpe_flags |= LPE_FLAG_LINK;
394         else
395                 sc->lpe_flags &= ~LPE_FLAG_LINK;
396
397         lpe_unlock(sc);
398 }
399
400 static void
401 lpe_reset(struct lpe_softc *sc)
402 {
403         uint32_t mac1;
404
405         /* Enter soft reset mode */
406         mac1 = lpe_read_4(sc, LPE_MAC1);
407         lpe_write_4(sc, LPE_MAC1, mac1 | LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
408             LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX);
409
410         /* Reset registers, Tx path and Rx path */
411         lpe_write_4(sc, LPE_COMMAND, LPE_COMMAND_REGRESET |
412             LPE_COMMAND_TXRESET | LPE_COMMAND_RXRESET);
413
414         /* Set station address */
415         lpe_write_4(sc, LPE_SA2, sc->lpe_enaddr[1] << 8 | sc->lpe_enaddr[0]);
416         lpe_write_4(sc, LPE_SA1, sc->lpe_enaddr[3] << 8 | sc->lpe_enaddr[2]);
417         lpe_write_4(sc, LPE_SA0, sc->lpe_enaddr[5] << 8 | sc->lpe_enaddr[4]);
418
419         /* Leave soft reset mode */
420         mac1 = lpe_read_4(sc, LPE_MAC1);
421         lpe_write_4(sc, LPE_MAC1, mac1 & ~(LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
422             LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX));
423 }
424
425 static void
426 lpe_init(void *arg)
427 {
428         struct lpe_softc *sc = (struct lpe_softc *)arg;
429
430         lpe_lock(sc);
431         lpe_init_locked(sc);
432         lpe_unlock(sc);
433 }
434
435 static void
436 lpe_init_locked(struct lpe_softc *sc)
437 {
438         struct ifnet *ifp = sc->lpe_ifp;
439         uint32_t cmd, mac1;
440
441         lpe_lock_assert(sc);
442
443         if (ifp->if_drv_flags & IFF_DRV_RUNNING)
444                 return;
445
446         /* Enable Tx and Rx */
447         cmd = lpe_read_4(sc, LPE_COMMAND);
448         lpe_write_4(sc, LPE_COMMAND, cmd | LPE_COMMAND_RXENABLE |
449             LPE_COMMAND_TXENABLE | LPE_COMMAND_PASSRUNTFRAME);
450
451         /* Enable receive */
452         mac1 = lpe_read_4(sc, LPE_MAC1);
453         lpe_write_4(sc, LPE_MAC1, /*mac1 |*/ LPE_MAC1_RXENABLE | LPE_MAC1_PASSALL);
454
455         lpe_write_4(sc, LPE_MAC2, LPE_MAC2_CRCENABLE | LPE_MAC2_PADCRCENABLE |
456             LPE_MAC2_FULLDUPLEX);
457
458         lpe_write_4(sc, LPE_MCFG, LPE_MCFG_CLKSEL(7));
459
460         /* Set up Rx filter */
461         lpe_set_rxmode(sc);
462
463         /* Enable interrupts */
464         lpe_write_4(sc, LPE_INTENABLE, LPE_INT_RXOVERRUN | LPE_INT_RXERROR |
465             LPE_INT_RXFINISH | LPE_INT_RXDONE | LPE_INT_TXUNDERRUN | 
466             LPE_INT_TXERROR | LPE_INT_TXFINISH | LPE_INT_TXDONE);
467
468         sc->lpe_cdata.lpe_tx_prod = 0;
469         sc->lpe_cdata.lpe_tx_last = 0;
470         sc->lpe_cdata.lpe_tx_used = 0;
471
472         lpe_init_rx(sc);
473
474         /* Initialize Rx packet and status descriptor heads */
475         lpe_write_4(sc, LPE_RXDESC, sc->lpe_rdata.lpe_rx_ring_phys);
476         lpe_write_4(sc, LPE_RXSTATUS, sc->lpe_rdata.lpe_rx_status_phys);
477         lpe_write_4(sc, LPE_RXDESC_NUMBER, LPE_RXDESC_NUM - 1);
478         lpe_write_4(sc, LPE_RXDESC_CONS, 0);
479
480         /* Initialize Tx packet and status descriptor heads */
481         lpe_write_4(sc, LPE_TXDESC, sc->lpe_rdata.lpe_tx_ring_phys);
482         lpe_write_4(sc, LPE_TXSTATUS, sc->lpe_rdata.lpe_tx_status_phys);
483         lpe_write_4(sc, LPE_TXDESC_NUMBER, LPE_TXDESC_NUM - 1);
484         lpe_write_4(sc, LPE_TXDESC_PROD, 0);
485
486         ifp->if_drv_flags |= IFF_DRV_RUNNING;
487         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
488
489         callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
490 }
491
492 static void
493 lpe_start(struct ifnet *ifp)
494 {
495         struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
496
497         lpe_lock(sc);
498         lpe_start_locked(ifp);
499         lpe_unlock(sc);
500 }
501
502 static void
503 lpe_start_locked(struct ifnet *ifp)
504 {
505         struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
506         struct mbuf *m_head;
507         int encap = 0;
508
509         lpe_lock_assert(sc);
510
511         while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
512                 if (lpe_read_4(sc, LPE_TXDESC_PROD) ==
513                     lpe_read_4(sc, LPE_TXDESC_CONS) - 5)
514                         break;
515
516                 /* Dequeue first packet */
517                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
518                 if (!m_head)
519                         break;
520
521                 lpe_encap(sc, &m_head);
522
523                 encap++;
524         }
525
526         /* Submit new descriptor list */
527         if (encap) {
528                 lpe_write_4(sc, LPE_TXDESC_PROD, sc->lpe_cdata.lpe_tx_prod);
529                 sc->lpe_watchdog_timer = 5;
530         }
531         
532 }
533
534 static int
535 lpe_encap(struct lpe_softc *sc, struct mbuf **m_head)
536 {
537         struct lpe_txdesc *txd;
538         struct lpe_hwdesc *hwd;
539         bus_dma_segment_t segs[LPE_MAXFRAGS];
540         int i, err, nsegs, prod;
541
542         lpe_lock_assert(sc);
543         M_ASSERTPKTHDR((*m_head));
544
545         prod = sc->lpe_cdata.lpe_tx_prod;
546         txd = &sc->lpe_cdata.lpe_tx_desc[prod];
547
548         debugf("starting with prod=%d\n", prod);
549
550         err = bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_tx_buf_tag,
551             txd->lpe_txdesc_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
552
553         if (err)
554                 return (err);
555
556         if (nsegs == 0) {
557                 m_freem(*m_head);
558                 *m_head = NULL;
559                 return (EIO);
560         }
561
562         bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag, txd->lpe_txdesc_dmamap,
563           BUS_DMASYNC_PREREAD);
564         bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
565             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
566
567         txd->lpe_txdesc_first = 1;
568         txd->lpe_txdesc_mbuf = *m_head;
569
570         for (i = 0; i < nsegs; i++) {
571                 hwd = &sc->lpe_rdata.lpe_tx_ring[prod];
572                 hwd->lhr_data = segs[i].ds_addr;
573                 hwd->lhr_control = segs[i].ds_len - 1;
574
575                 if (i == nsegs - 1) {
576                         hwd->lhr_control |= LPE_HWDESC_LASTFLAG;
577                         hwd->lhr_control |= LPE_HWDESC_INTERRUPT;
578                         hwd->lhr_control |= LPE_HWDESC_CRC;
579                         hwd->lhr_control |= LPE_HWDESC_PAD;
580                 }
581
582                 LPE_INC(prod, LPE_TXDESC_NUM);
583         }
584
585         bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
586             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
587
588         sc->lpe_cdata.lpe_tx_used += nsegs;
589         sc->lpe_cdata.lpe_tx_prod = prod;
590
591         return (0);
592 }
593
594 static void
595 lpe_stop(struct lpe_softc *sc)
596 {
597         lpe_lock(sc);
598         lpe_stop_locked(sc);
599         lpe_unlock(sc);
600 }
601
602 static void
603 lpe_stop_locked(struct lpe_softc *sc)
604 {
605         lpe_lock_assert(sc);
606
607         callout_stop(&sc->lpe_tick);
608
609         /* Disable interrupts */
610         lpe_write_4(sc, LPE_INTCLEAR, 0xffffffff);
611
612         /* Stop EMAC */
613         lpe_write_4(sc, LPE_MAC1, 0);
614         lpe_write_4(sc, LPE_MAC2, 0);
615         lpe_write_4(sc, LPE_COMMAND, 0);
616
617         sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
618         sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
619 }
620
621 static int
622 lpe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
623 {
624         struct lpe_softc *sc = ifp->if_softc;
625         struct mii_data *mii = device_get_softc(sc->lpe_miibus);
626         struct ifreq *ifr = (struct ifreq *)data;
627         int err = 0;
628
629         switch (cmd) {
630         case SIOCSIFFLAGS:
631                 lpe_lock(sc);
632                 if (ifp->if_flags & IFF_UP) {
633                         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
634                                 lpe_set_rxmode(sc);
635                                 lpe_set_rxfilter(sc);
636                         } else
637                                 lpe_init_locked(sc);
638                 } else
639                         lpe_stop(sc);
640                 lpe_unlock(sc);
641                 break;
642         case SIOCADDMULTI:
643         case SIOCDELMULTI:
644                 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
645                         lpe_lock(sc);
646                         lpe_set_rxfilter(sc);
647                         lpe_unlock(sc);
648                 }
649                 break;
650         case SIOCGIFMEDIA:
651         case SIOCSIFMEDIA:
652                 err = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
653                 break;
654         default:
655                 err = ether_ioctl(ifp, cmd, data);
656                 break;
657         }
658
659         return (err);
660 }
661
662 static void lpe_set_rxmode(struct lpe_softc *sc)
663 {
664         struct ifnet *ifp = sc->lpe_ifp;
665         uint32_t rxfilt;
666
667         rxfilt = LPE_RXFILTER_UNIHASH | LPE_RXFILTER_MULTIHASH | LPE_RXFILTER_PERFECT;
668
669         if (ifp->if_flags & IFF_BROADCAST)
670                 rxfilt |= LPE_RXFILTER_BROADCAST;
671
672         if (ifp->if_flags & IFF_PROMISC)
673                 rxfilt |= LPE_RXFILTER_UNICAST | LPE_RXFILTER_MULTICAST;
674
675         if (ifp->if_flags & IFF_ALLMULTI)
676                 rxfilt |= LPE_RXFILTER_MULTICAST;
677
678         lpe_write_4(sc, LPE_RXFILTER_CTRL, rxfilt);
679 }
680
681 static void lpe_set_rxfilter(struct lpe_softc *sc)
682 {
683         struct ifnet *ifp = sc->lpe_ifp;
684         struct ifmultiaddr *ifma;
685         int index;
686         uint32_t hashl, hashh;
687
688         hashl = 0;
689         hashh = 0;
690
691         if_maddr_rlock(ifp);
692         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
693                 if (ifma->ifma_addr->sa_family != AF_LINK)
694                         continue;
695
696                 index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
697                     ifma->ifma_addr), ETHER_ADDR_LEN) >> 23 & 0x3f;
698
699                 if (index > 31)
700                         hashh |= (1 << (index - 32));
701                 else
702                         hashl |= (1 << index);
703         }
704         if_maddr_runlock(ifp);
705
706         /* Program new hash filter */
707         lpe_write_4(sc, LPE_HASHFILTER_L, hashl);
708         lpe_write_4(sc, LPE_HASHFILTER_H, hashh);
709 }
710
711 static void
712 lpe_intr(void *arg)
713 {
714         struct lpe_softc *sc = (struct lpe_softc *)arg;
715         uint32_t intstatus;
716
717         debugf("status=0x%08x\n", lpe_read_4(sc, LPE_INTSTATUS));
718
719         lpe_lock(sc);
720
721         while ((intstatus = lpe_read_4(sc, LPE_INTSTATUS))) {
722                 if (intstatus & LPE_INT_RXDONE)
723                         lpe_rxintr(sc);
724
725                 if (intstatus & LPE_INT_TXDONE)
726                         lpe_txintr(sc);
727         
728                 lpe_write_4(sc, LPE_INTCLEAR, 0xffff);
729         }
730
731         lpe_unlock(sc);
732 }
733
734 static void
735 lpe_rxintr(struct lpe_softc *sc)
736 {
737         struct ifnet *ifp = sc->lpe_ifp;
738         struct lpe_hwdesc *hwd;
739         struct lpe_hwstatus *hws;
740         struct lpe_rxdesc *rxd;
741         struct mbuf *m;
742         int prod, cons;
743
744         for (;;) {
745                 prod = lpe_read_4(sc, LPE_RXDESC_PROD);
746                 cons = lpe_read_4(sc, LPE_RXDESC_CONS);
747                 
748                 if (prod == cons)
749                         break;
750
751                 rxd = &sc->lpe_cdata.lpe_rx_desc[cons];
752                 hwd = &sc->lpe_rdata.lpe_rx_ring[cons];
753                 hws = &sc->lpe_rdata.lpe_rx_status[cons];
754
755                 /* Check received frame for errors */
756                 if (hws->lhs_info & LPE_HWDESC_RXERRS) {
757                         ifp->if_ierrors++;
758                         lpe_discard_rxbuf(sc, cons);
759                         lpe_init_rxbuf(sc, cons);
760                         goto skip;
761                 }
762
763                 m = rxd->lpe_rxdesc_mbuf;
764                 m->m_pkthdr.rcvif = ifp;
765                 m->m_data += 2;
766
767                 ifp->if_ipackets++;
768
769                 lpe_unlock(sc);
770                 (*ifp->if_input)(ifp, m);       
771                 lpe_lock(sc);
772
773                 lpe_init_rxbuf(sc, cons);
774 skip:
775                 LPE_INC(cons, LPE_RXDESC_NUM);
776                 lpe_write_4(sc, LPE_RXDESC_CONS, cons);
777         }
778 }
779
780 static void
781 lpe_txintr(struct lpe_softc *sc)
782 {
783         struct ifnet *ifp = sc->lpe_ifp;
784         struct lpe_hwdesc *hwd;
785         struct lpe_hwstatus *hws;
786         struct lpe_txdesc *txd;
787         int cons, last;
788
789         for (;;) {
790                 cons = lpe_read_4(sc, LPE_TXDESC_CONS);
791                 last = sc->lpe_cdata.lpe_tx_last;
792                 
793                 if (cons == last)
794                         break;
795
796                 txd = &sc->lpe_cdata.lpe_tx_desc[last];
797                 hwd = &sc->lpe_rdata.lpe_tx_ring[last];
798                 hws = &sc->lpe_rdata.lpe_tx_status[last];
799
800                 bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag,
801                     txd->lpe_txdesc_dmamap, BUS_DMASYNC_POSTWRITE);
802
803                 ifp->if_collisions += LPE_HWDESC_COLLISIONS(hws->lhs_info);
804
805                 if (hws->lhs_info & LPE_HWDESC_TXERRS)
806                         ifp->if_oerrors++;
807                 else
808                         ifp->if_opackets++;
809
810                 if (txd->lpe_txdesc_first) {
811                         bus_dmamap_unload(sc->lpe_cdata.lpe_tx_buf_tag,
812                             txd->lpe_txdesc_dmamap);    
813
814                         m_freem(txd->lpe_txdesc_mbuf);
815                         txd->lpe_txdesc_mbuf = NULL;
816                         txd->lpe_txdesc_first = 0;
817                 }
818
819                 sc->lpe_cdata.lpe_tx_used--;
820                 LPE_INC(sc->lpe_cdata.lpe_tx_last, LPE_TXDESC_NUM);
821         }
822
823         if (!sc->lpe_cdata.lpe_tx_used)
824                 sc->lpe_watchdog_timer = 0;
825 }
826
827 static void
828 lpe_tick(void *arg)
829 {
830         struct lpe_softc *sc = (struct lpe_softc *)arg;
831         struct mii_data *mii = device_get_softc(sc->lpe_miibus);
832
833         lpe_lock_assert(sc);
834         
835         mii_tick(mii);
836         lpe_watchdog(sc);
837
838         callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
839 }
840
841 static void
842 lpe_watchdog(struct lpe_softc *sc)
843 {
844         struct ifnet *ifp = sc->lpe_ifp;
845
846         lpe_lock_assert(sc);
847
848         if (sc->lpe_watchdog_timer == 0 || sc->lpe_watchdog_timer--)
849                 return;
850
851         /* Chip has stopped responding */
852         device_printf(sc->lpe_dev, "WARNING: chip hangup, restarting...\n");
853         lpe_stop_locked(sc);
854         lpe_init_locked(sc);
855
856         /* Try to resend packets */
857         if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
858                 lpe_start_locked(ifp);
859 }
860
861 static int
862 lpe_dma_alloc(struct lpe_softc *sc)
863 {
864         int err;
865
866         /* Create parent DMA tag */
867         err = bus_dma_tag_create(
868             bus_get_dma_tag(sc->lpe_dev),
869             1, 0,                       /* alignment, boundary */
870             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
871             BUS_SPACE_MAXADDR,          /* highaddr */
872             NULL, NULL,                 /* filter, filterarg */
873             BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
874             BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsegsize, flags */
875             NULL, NULL,                 /* lockfunc, lockarg */
876             &sc->lpe_cdata.lpe_parent_tag);
877
878         if (err) {
879                 device_printf(sc->lpe_dev, "cannot create parent DMA tag\n");
880                 return (err);
881         }
882
883         err = lpe_dma_alloc_rx(sc);
884         if (err)
885                 return (err);
886
887         err = lpe_dma_alloc_tx(sc);
888         if (err)
889                 return (err);
890
891         return (0);
892 }
893
894 static int
895 lpe_dma_alloc_rx(struct lpe_softc *sc)
896 {
897         struct lpe_rxdesc *rxd;
898         struct lpe_dmamap_arg ctx;
899         int err, i;
900
901         /* Create tag for Rx ring */
902         err = bus_dma_tag_create(
903             sc->lpe_cdata.lpe_parent_tag,
904             LPE_DESC_ALIGN, 0,          /* alignment, boundary */
905             BUS_SPACE_MAXADDR,          /* lowaddr */
906             BUS_SPACE_MAXADDR,          /* highaddr */
907             NULL, NULL,                 /* filter, filterarg */
908             LPE_RXDESC_SIZE, 1,         /* maxsize, nsegments */
909             LPE_RXDESC_SIZE, 0,         /* maxsegsize, flags */
910             NULL, NULL,                 /* lockfunc, lockarg */
911             &sc->lpe_cdata.lpe_rx_ring_tag);
912
913         if (err) {
914                 device_printf(sc->lpe_dev, "cannot create Rx ring DMA tag\n");
915                 goto fail;
916         }
917
918         /* Create tag for Rx status ring */
919         err = bus_dma_tag_create(
920             sc->lpe_cdata.lpe_parent_tag,
921             LPE_DESC_ALIGN, 0,          /* alignment, boundary */
922             BUS_SPACE_MAXADDR,          /* lowaddr */
923             BUS_SPACE_MAXADDR,          /* highaddr */
924             NULL, NULL,                 /* filter, filterarg */
925             LPE_RXSTATUS_SIZE, 1,       /* maxsize, nsegments */
926             LPE_RXSTATUS_SIZE, 0,       /* maxsegsize, flags */
927             NULL, NULL,                 /* lockfunc, lockarg */
928             &sc->lpe_cdata.lpe_rx_status_tag);
929
930         if (err) {
931                 device_printf(sc->lpe_dev, "cannot create Rx status ring DMA tag\n");
932                 goto fail;
933         }
934
935         /* Create tag for Rx buffers */
936         err = bus_dma_tag_create(
937             sc->lpe_cdata.lpe_parent_tag,
938             LPE_DESC_ALIGN, 0,          /* alignment, boundary */
939             BUS_SPACE_MAXADDR,          /* lowaddr */
940             BUS_SPACE_MAXADDR,          /* highaddr */
941             NULL, NULL,                 /* filter, filterarg */
942             MCLBYTES * LPE_RXDESC_NUM,  /* maxsize */
943             LPE_RXDESC_NUM,             /* segments */
944             MCLBYTES, 0,                /* maxsegsize, flags */
945             NULL, NULL,                 /* lockfunc, lockarg */
946             &sc->lpe_cdata.lpe_rx_buf_tag);
947
948         if (err) {
949                 device_printf(sc->lpe_dev, "cannot create Rx buffers DMA tag\n");
950                 goto fail;
951         }
952
953         /* Allocate Rx DMA ring */
954         err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_ring_tag,
955             (void **)&sc->lpe_rdata.lpe_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
956             BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_ring_map);
957
958         err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_ring_tag, 
959             sc->lpe_cdata.lpe_rx_ring_map, sc->lpe_rdata.lpe_rx_ring,
960             LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
961
962         sc->lpe_rdata.lpe_rx_ring_phys = ctx.lpe_dma_busaddr;
963
964         /* Allocate Rx status ring */
965         err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_status_tag,
966             (void **)&sc->lpe_rdata.lpe_rx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
967             BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_status_map);
968
969         err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_status_tag, 
970             sc->lpe_cdata.lpe_rx_status_map, sc->lpe_rdata.lpe_rx_status,
971             LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
972
973         sc->lpe_rdata.lpe_rx_status_phys = ctx.lpe_dma_busaddr;
974
975
976         /* Create Rx buffers DMA map */
977         for (i = 0; i < LPE_RXDESC_NUM; i++) {
978                 rxd = &sc->lpe_cdata.lpe_rx_desc[i];
979                 rxd->lpe_rxdesc_mbuf = NULL;
980                 rxd->lpe_rxdesc_dmamap = NULL;
981
982                 err = bus_dmamap_create(sc->lpe_cdata.lpe_rx_buf_tag, 0,
983                     &rxd->lpe_rxdesc_dmamap);
984
985                 if (err) {
986                         device_printf(sc->lpe_dev, "cannot create Rx DMA map\n");
987                         return (err);
988                 }
989         }
990
991         return (0);
992 fail:
993         return (err);
994 }
995
996 static int
997 lpe_dma_alloc_tx(struct lpe_softc *sc)
998 {
999         struct lpe_txdesc *txd;
1000         struct lpe_dmamap_arg ctx;
1001         int err, i;
1002
1003         /* Create tag for Tx ring */
1004         err = bus_dma_tag_create(
1005             sc->lpe_cdata.lpe_parent_tag,
1006             LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1007             BUS_SPACE_MAXADDR,          /* lowaddr */
1008             BUS_SPACE_MAXADDR,          /* highaddr */
1009             NULL, NULL,                 /* filter, filterarg */
1010             LPE_TXDESC_SIZE, 1,         /* maxsize, nsegments */
1011             LPE_TXDESC_SIZE, 0,         /* maxsegsize, flags */
1012             NULL, NULL,                 /* lockfunc, lockarg */
1013             &sc->lpe_cdata.lpe_tx_ring_tag);
1014
1015         if (err) {
1016                 device_printf(sc->lpe_dev, "cannot create Tx ring DMA tag\n");
1017                 goto fail;
1018         }
1019
1020         /* Create tag for Tx status ring */
1021         err = bus_dma_tag_create(
1022             sc->lpe_cdata.lpe_parent_tag,
1023             LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1024             BUS_SPACE_MAXADDR,          /* lowaddr */
1025             BUS_SPACE_MAXADDR,          /* highaddr */
1026             NULL, NULL,                 /* filter, filterarg */
1027             LPE_TXSTATUS_SIZE, 1,       /* maxsize, nsegments */
1028             LPE_TXSTATUS_SIZE, 0,       /* maxsegsize, flags */
1029             NULL, NULL,                 /* lockfunc, lockarg */
1030             &sc->lpe_cdata.lpe_tx_status_tag);
1031
1032         if (err) {
1033                 device_printf(sc->lpe_dev, "cannot create Tx status ring DMA tag\n");
1034                 goto fail;
1035         }
1036
1037         /* Create tag for Tx buffers */
1038         err = bus_dma_tag_create(
1039             sc->lpe_cdata.lpe_parent_tag,
1040             LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1041             BUS_SPACE_MAXADDR,          /* lowaddr */
1042             BUS_SPACE_MAXADDR,          /* highaddr */
1043             NULL, NULL,                 /* filter, filterarg */
1044             MCLBYTES * LPE_TXDESC_NUM,  /* maxsize */
1045             LPE_TXDESC_NUM,             /* segments */
1046             MCLBYTES, 0,                /* maxsegsize, flags */
1047             NULL, NULL,                 /* lockfunc, lockarg */
1048             &sc->lpe_cdata.lpe_tx_buf_tag);
1049
1050         if (err) {
1051                 device_printf(sc->lpe_dev, "cannot create Tx buffers DMA tag\n");
1052                 goto fail;
1053         }
1054
1055         /* Allocate Tx DMA ring */
1056         err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_ring_tag,
1057             (void **)&sc->lpe_rdata.lpe_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1058             BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_ring_map);
1059
1060         err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_ring_tag, 
1061             sc->lpe_cdata.lpe_tx_ring_map, sc->lpe_rdata.lpe_tx_ring,
1062             LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1063
1064         sc->lpe_rdata.lpe_tx_ring_phys = ctx.lpe_dma_busaddr;
1065
1066         /* Allocate Tx status ring */
1067         err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_status_tag,
1068             (void **)&sc->lpe_rdata.lpe_tx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1069             BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_status_map);
1070
1071         err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_status_tag, 
1072             sc->lpe_cdata.lpe_tx_status_map, sc->lpe_rdata.lpe_tx_status,
1073             LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1074
1075         sc->lpe_rdata.lpe_tx_status_phys = ctx.lpe_dma_busaddr;
1076
1077
1078         /* Create Tx buffers DMA map */
1079         for (i = 0; i < LPE_TXDESC_NUM; i++) {
1080                 txd = &sc->lpe_cdata.lpe_tx_desc[i];
1081                 txd->lpe_txdesc_mbuf = NULL;
1082                 txd->lpe_txdesc_dmamap = NULL;
1083                 txd->lpe_txdesc_first = 0;
1084
1085                 err = bus_dmamap_create(sc->lpe_cdata.lpe_tx_buf_tag, 0,
1086                     &txd->lpe_txdesc_dmamap);
1087
1088                 if (err) {
1089                         device_printf(sc->lpe_dev, "cannot create Tx DMA map\n");
1090                         return (err);
1091                 }
1092         }
1093
1094         return (0);
1095 fail:
1096         return (err);
1097 }
1098
1099 static int
1100 lpe_init_rx(struct lpe_softc *sc)
1101 {
1102         int i, err;
1103
1104         for (i = 0; i < LPE_RXDESC_NUM; i++) {
1105                 err = lpe_init_rxbuf(sc, i);
1106                 if (err)
1107                         return (err);
1108         }
1109
1110         return (0);
1111 }
1112
1113 static int
1114 lpe_init_rxbuf(struct lpe_softc *sc, int n)
1115 {
1116         struct lpe_rxdesc *rxd;
1117         struct lpe_hwdesc *hwd;
1118         struct lpe_hwstatus *hws;
1119         struct mbuf *m;
1120         bus_dma_segment_t segs[1];
1121         int nsegs;
1122
1123         rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1124         hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1125         hws = &sc->lpe_rdata.lpe_rx_status[n];
1126         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1127
1128         if (!m) {
1129                 device_printf(sc->lpe_dev, "WARNING: mbufs exhausted!\n");
1130                 return (ENOBUFS);
1131         }
1132
1133         m->m_len = m->m_pkthdr.len = MCLBYTES;
1134
1135         bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1136
1137         if (bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_rx_buf_tag, 
1138             rxd->lpe_rxdesc_dmamap, m, segs, &nsegs, 0)) {
1139                 m_freem(m);
1140                 return (ENOBUFS);
1141         }
1142
1143         bus_dmamap_sync(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap, 
1144             BUS_DMASYNC_PREREAD);
1145
1146         rxd->lpe_rxdesc_mbuf = m;
1147         hwd->lhr_data = segs[0].ds_addr + 2;
1148         hwd->lhr_control = (segs[0].ds_len - 1) | LPE_HWDESC_INTERRUPT;
1149
1150         return (0);
1151 }
1152
1153 static void
1154 lpe_discard_rxbuf(struct lpe_softc *sc, int n)
1155 {
1156         struct lpe_rxdesc *rxd;
1157         struct lpe_hwdesc *hwd;
1158
1159         rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1160         hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1161
1162         bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1163
1164         hwd->lhr_data = 0;
1165         hwd->lhr_control = 0;
1166
1167         if (rxd->lpe_rxdesc_mbuf) {
1168                 m_freem(rxd->lpe_rxdesc_mbuf); 
1169                 rxd->lpe_rxdesc_mbuf = NULL;
1170         }
1171 }
1172
1173 static void
1174 lpe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1175 {
1176         struct lpe_dmamap_arg *ctx;
1177
1178         if (error)
1179                 return;
1180
1181         ctx = (struct lpe_dmamap_arg *)arg;
1182         ctx->lpe_dma_busaddr = segs[0].ds_addr;
1183 }
1184
1185 static int
1186 lpe_ifmedia_upd(struct ifnet *ifp)
1187 {
1188         return (0);
1189 }
1190
1191 static void
1192 lpe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1193 {
1194         struct lpe_softc *sc = ifp->if_softc;
1195         struct mii_data *mii = device_get_softc(sc->lpe_miibus);
1196
1197         lpe_lock(sc);
1198         mii_pollstat(mii);
1199         ifmr->ifm_active = mii->mii_media_active;
1200         ifmr->ifm_status = mii->mii_media_status;
1201         lpe_unlock(sc);
1202 }
1203
1204 static device_method_t lpe_methods[] = {
1205         /* Device interface */
1206         DEVMETHOD(device_probe,         lpe_probe),
1207         DEVMETHOD(device_attach,        lpe_attach),
1208         DEVMETHOD(device_detach,        lpe_detach),
1209
1210         /* Bus interface */
1211         DEVMETHOD(bus_print_child,      bus_generic_print_child),
1212
1213         /* MII interface */
1214         DEVMETHOD(miibus_readreg,       lpe_miibus_readreg),
1215         DEVMETHOD(miibus_writereg,      lpe_miibus_writereg),
1216         DEVMETHOD(miibus_statchg,       lpe_miibus_statchg),
1217         { 0, 0 }
1218 };
1219
1220 static driver_t lpe_driver = {
1221         "lpe",
1222         lpe_methods,
1223         sizeof(struct lpe_softc),
1224 };
1225
1226 static devclass_t lpe_devclass;
1227
1228 DRIVER_MODULE(lpe, simplebus, lpe_driver, lpe_devclass, 0, 0);
1229 DRIVER_MODULE(miibus, lpe, miibus_driver, miibus_devclass, 0, 0);
1230 MODULE_DEPEND(lpe, obio, 1, 1, 1);
1231 MODULE_DEPEND(lpe, miibus, 1, 1, 1);
1232 MODULE_DEPEND(lpe, ether, 1, 1, 1);