]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/mips/atheros/if_arge.c
Converted the remainder of the NIC drivers to use the mii_attach()
[FreeBSD/FreeBSD.git] / sys / mips / atheros / if_arge.c
1 /*-
2  * Copyright (c) 2009, Oleksandr Tymoshenko
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 /*
32  * AR71XX gigabit ethernet driver
33  */
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/endian.h>
40 #include <sys/systm.h>
41 #include <sys/sockio.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/module.h>
46 #include <sys/socket.h>
47 #include <sys/taskqueue.h>
48 #include <sys/sysctl.h>
49
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/ethernet.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56
57 #include <net/bpf.h>
58
59 #include <machine/bus.h>
60 #include <machine/cache.h>
61 #include <machine/resource.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65 #include <machine/pmap.h>
66 #include <sys/bus.h>
67 #include <sys/rman.h>
68
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
74
75 MODULE_DEPEND(arge, ether, 1, 1, 1);
76 MODULE_DEPEND(arge, miibus, 1, 1, 1);
77
78 #include "miibus_if.h"
79
80 #include <mips/atheros/ar71xxreg.h>
81 #include <mips/atheros/if_argevar.h>
82 #include <mips/atheros/ar71xx_cpudef.h>
83
84 #undef ARGE_DEBUG
85 #ifdef ARGE_DEBUG
86 #define dprintf printf
87 #else
88 #define dprintf(x, arg...)
89 #endif
90
91 static int arge_attach(device_t);
92 static int arge_detach(device_t);
93 static void arge_flush_ddr(struct arge_softc *);
94 static int arge_ifmedia_upd(struct ifnet *);
95 static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
96 static int arge_ioctl(struct ifnet *, u_long, caddr_t);
97 static void arge_init(void *);
98 static void arge_init_locked(struct arge_softc *);
99 static void arge_link_task(void *, int);
100 static void arge_set_pll(struct arge_softc *, int, int);
101 static int arge_miibus_readreg(device_t, int, int);
102 static void arge_miibus_statchg(device_t);
103 static int arge_miibus_writereg(device_t, int, int, int);
104 static int arge_probe(device_t);
105 static void arge_reset_dma(struct arge_softc *);
106 static int arge_resume(device_t);
107 static int arge_rx_ring_init(struct arge_softc *);
108 static int arge_tx_ring_init(struct arge_softc *);
109 #ifdef DEVICE_POLLING
110 static int arge_poll(struct ifnet *, enum poll_cmd, int);
111 #endif
112 static int arge_shutdown(device_t);
113 static void arge_start(struct ifnet *);
114 static void arge_start_locked(struct ifnet *);
115 static void arge_stop(struct arge_softc *);
116 static int arge_suspend(device_t);
117
118 static int arge_rx_locked(struct arge_softc *);
119 static void arge_tx_locked(struct arge_softc *);
120 static void arge_intr(void *);
121 static int arge_intr_filter(void *);
122 static void arge_tick(void *);
123
124 /*
125  * ifmedia callbacks for multiPHY MAC
126  */
127 void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *);
128 int arge_multiphy_mediachange(struct ifnet *);
129
130 static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
131 static int arge_dma_alloc(struct arge_softc *);
132 static void arge_dma_free(struct arge_softc *);
133 static int arge_newbuf(struct arge_softc *, int);
134 static __inline void arge_fixup_rx(struct mbuf *);
135
136 static device_method_t arge_methods[] = {
137         /* Device interface */
138         DEVMETHOD(device_probe,         arge_probe),
139         DEVMETHOD(device_attach,        arge_attach),
140         DEVMETHOD(device_detach,        arge_detach),
141         DEVMETHOD(device_suspend,       arge_suspend),
142         DEVMETHOD(device_resume,        arge_resume),
143         DEVMETHOD(device_shutdown,      arge_shutdown),
144
145         /* bus interface */
146         DEVMETHOD(bus_print_child,      bus_generic_print_child),
147         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
148
149         /* MII interface */
150         DEVMETHOD(miibus_readreg,       arge_miibus_readreg),
151         DEVMETHOD(miibus_writereg,      arge_miibus_writereg),
152         DEVMETHOD(miibus_statchg,       arge_miibus_statchg),
153
154         { 0, 0 }
155 };
156
157 static driver_t arge_driver = {
158         "arge",
159         arge_methods,
160         sizeof(struct arge_softc)
161 };
162
163 static devclass_t arge_devclass;
164
165 DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
166 DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
167
168 /*
169  * RedBoot passes MAC address to entry point as environment 
170  * variable. platfrom_start parses it and stores in this variable
171  */
172 extern uint32_t ar711_base_mac[ETHER_ADDR_LEN];
173
174 static struct mtx miibus_mtx;
175
176 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF);
177
178
179 /*
180  * Flushes all 
181  */
182 static void
183 arge_flush_ddr(struct arge_softc *sc)
184 {
185         if (sc->arge_mac_unit == 0)
186                 ar71xx_device_flush_ddr_ge0();
187         else
188                 ar71xx_device_flush_ddr_ge1();
189 }
190
191 static int 
192 arge_probe(device_t dev)
193 {
194
195         device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
196         return (0);
197 }
198
199 static void
200 arge_attach_sysctl(device_t dev)
201 {
202         struct arge_softc *sc = device_get_softc(dev);
203         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
204         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
205
206         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
207                 "debug", CTLFLAG_RW, &sc->arge_debug, 0,
208                 "arge interface debugging flags");
209
210         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
211                 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0,
212                 "number of TX aligned packets");
213
214         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
215                 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 0,
216                 "number of TX unaligned packets");
217 }
218
219 static int
220 arge_attach(device_t dev)
221 {
222         uint8_t                 eaddr[ETHER_ADDR_LEN];
223         struct ifnet            *ifp;
224         struct arge_softc       *sc;
225         int                     error = 0, rid, phymask;
226         uint32_t                reg, rnd;
227         int                     is_base_mac_empty, i, phys_total;
228         uint32_t                hint;
229
230         sc = device_get_softc(dev);
231         sc->arge_dev = dev;
232         sc->arge_mac_unit = device_get_unit(dev);
233
234         KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)), 
235             ("if_arge: Only MAC0 and MAC1 supported"));
236
237         /*
238          *  Get which PHY of 5 available we should use for this unit
239          */
240         if (resource_int_value(device_get_name(dev), device_get_unit(dev), 
241             "phymask", &phymask) != 0) {
242                 /*
243                  * Use port 4 (WAN) for GE0. For any other port use 
244                  * its PHY the same as its unit number 
245                  */
246                 if (sc->arge_mac_unit == 0)
247                         phymask = (1 << 4);
248                 else
249                         /* Use all phys up to 4 */
250                         phymask = (1 << 4) - 1;
251
252                 device_printf(dev, "No PHY specified, using mask %d\n", phymask);
253         }
254
255         /*
256          *  Get default media & duplex mode, by default its Base100T 
257          *  and full duplex
258          */
259         if (resource_int_value(device_get_name(dev), device_get_unit(dev), 
260             "media", &hint) != 0)
261                 hint = 0;
262
263         if (hint == 1000)
264                 sc->arge_media_type = IFM_1000_T;
265         else
266                 sc->arge_media_type = IFM_100_TX;
267
268         if (resource_int_value(device_get_name(dev), device_get_unit(dev), 
269             "fduplex", &hint) != 0)
270                 hint = 1;
271
272         if (hint)
273                 sc->arge_duplex_mode = IFM_FDX;
274         else
275                 sc->arge_duplex_mode = 0;
276
277         sc->arge_phymask = phymask;
278
279         mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
280             MTX_DEF);
281         callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
282         TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
283
284         /* Map control/status registers. */
285         sc->arge_rid = 0;
286         sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 
287             &sc->arge_rid, RF_ACTIVE);
288
289         if (sc->arge_res == NULL) {
290                 device_printf(dev, "couldn't map memory\n");
291                 error = ENXIO;
292                 goto fail;
293         }
294
295         /* Allocate interrupts */
296         rid = 0;
297         sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 
298             RF_SHAREABLE | RF_ACTIVE);
299
300         if (sc->arge_irq == NULL) {
301                 device_printf(dev, "couldn't map interrupt\n");
302                 error = ENXIO;
303                 goto fail;
304         }
305
306         /* Allocate ifnet structure. */
307         ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
308
309         if (ifp == NULL) {
310                 device_printf(dev, "couldn't allocate ifnet structure\n");
311                 error = ENOSPC;
312                 goto fail;
313         }
314
315         ifp->if_softc = sc;
316         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
317         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
318         ifp->if_ioctl = arge_ioctl;
319         ifp->if_start = arge_start;
320         ifp->if_init = arge_init;
321         sc->arge_if_flags = ifp->if_flags;
322
323         /* XXX: add real size */
324         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
325         ifp->if_snd.ifq_maxlen = ifqmaxlen;
326         IFQ_SET_READY(&ifp->if_snd);
327
328         ifp->if_capenable = ifp->if_capabilities;
329 #ifdef DEVICE_POLLING
330         ifp->if_capabilities |= IFCAP_POLLING;
331 #endif
332
333         is_base_mac_empty = 1;
334         for (i = 0; i < ETHER_ADDR_LEN; i++) {
335                 eaddr[i] = ar711_base_mac[i] & 0xff;
336                 if (eaddr[i] != 0)
337                         is_base_mac_empty = 0;
338         }
339
340         if (is_base_mac_empty) {
341                 /*
342                  * No MAC address configured. Generate the random one.
343                  */
344                 if  (bootverbose)
345                         device_printf(dev, 
346                             "Generating random ethernet address.\n");
347
348                 rnd = arc4random();
349                 eaddr[0] = 'b';
350                 eaddr[1] = 's';
351                 eaddr[2] = 'd';
352                 eaddr[3] = (rnd >> 24) & 0xff;
353                 eaddr[4] = (rnd >> 16) & 0xff;
354                 eaddr[5] = (rnd >> 8) & 0xff;
355         }
356
357         if (sc->arge_mac_unit != 0)
358                 eaddr[5] +=  sc->arge_mac_unit;
359
360         if (arge_dma_alloc(sc) != 0) {
361                 error = ENXIO;
362                 goto fail;
363         }
364
365         /* Initialize the MAC block */
366         
367         /* Step 1. Soft-reset MAC */
368         ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
369         DELAY(20);
370
371         /* Step 2. Punt the MAC core from the central reset register */
372         ar71xx_device_stop(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
373         DELAY(100);
374         ar71xx_device_start(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
375
376         /* Step 3. Reconfigure MAC block */
377         ARGE_WRITE(sc, AR71XX_MAC_CFG1, 
378                 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
379                 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
380
381         reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
382         reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
383         ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
384
385         ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
386
387         /* Reset MII bus */
388         ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET);
389         DELAY(100);
390         ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28);
391         DELAY(100);
392
393         /* 
394          * Set all Ethernet address registers to the same initial values
395          * set all four addresses to 66-88-aa-cc-dd-ee 
396          */
397         ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, 
398             (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8)  | eaddr[5]);
399         ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (eaddr[0] << 8) | eaddr[1]);
400
401         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0, 
402             FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
403         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
404         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
405
406         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH, 
407             FIFO_RX_FILTMATCH_DEFAULT);
408
409         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 
410             FIFO_RX_FILTMASK_DEFAULT);
411
412         /* 
413          * Check if we have single-PHY MAC or multi-PHY
414          */
415         phys_total = 0;
416         for (i = 0; i < ARGE_NPHY; i++)
417                 if (phymask & (1 << i))
418                         phys_total ++;
419
420         if (phys_total == 0) {
421                 error = EINVAL;
422                 goto fail;
423         }
424
425         if (phys_total == 1) {
426                 /* Do MII setup. */
427                 error = mii_attach(dev, &sc->arge_miibus, ifp,
428                     arge_ifmedia_upd, arge_ifmedia_sts, BMSR_DEFCAPMASK,
429                     MII_PHY_ANY, MII_OFFSET_ANY, 0);
430                 if (error != 0) {
431                         device_printf(dev, "attaching PHYs failed\n");
432                         goto fail;
433                 }
434         }
435         else {
436                 ifmedia_init(&sc->arge_ifmedia, 0, 
437                     arge_multiphy_mediachange,
438                     arge_multiphy_mediastatus);
439                 ifmedia_add(&sc->arge_ifmedia,
440                     IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode, 
441                     0, NULL);
442                 ifmedia_set(&sc->arge_ifmedia,
443                     IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode);
444                 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
445         }
446
447         /* Call MI attach routine. */
448         ether_ifattach(ifp, eaddr);
449
450         /* Hook interrupt last to avoid having to lock softc */
451         error = bus_setup_intr(dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
452             arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
453
454         if (error) {
455                 device_printf(dev, "couldn't set up irq\n");
456                 ether_ifdetach(ifp);
457                 goto fail;
458         }
459
460         /* setup sysctl variables */
461         arge_attach_sysctl(dev);
462
463 fail:
464         if (error) 
465                 arge_detach(dev);
466
467         return (error);
468 }
469
470 static int
471 arge_detach(device_t dev)
472 {
473         struct arge_softc       *sc = device_get_softc(dev);
474         struct ifnet            *ifp = sc->arge_ifp;
475
476         KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized"));
477
478         /* These should only be active if attach succeeded */
479         if (device_is_attached(dev)) {
480                 ARGE_LOCK(sc);
481                 sc->arge_detach = 1;
482 #ifdef DEVICE_POLLING
483                 if (ifp->if_capenable & IFCAP_POLLING)
484                         ether_poll_deregister(ifp);
485 #endif
486
487                 arge_stop(sc);
488                 ARGE_UNLOCK(sc);
489                 taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
490                 ether_ifdetach(ifp);
491         }
492
493         if (sc->arge_miibus)
494                 device_delete_child(dev, sc->arge_miibus);
495
496         bus_generic_detach(dev);
497
498         if (sc->arge_intrhand)
499                 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
500
501         if (sc->arge_res)
502                 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid, 
503                     sc->arge_res);
504
505         if (ifp)
506                 if_free(ifp);
507
508         arge_dma_free(sc);
509
510         mtx_destroy(&sc->arge_mtx);
511
512         return (0);
513
514 }
515
516 static int
517 arge_suspend(device_t dev)
518 {
519
520         panic("%s", __func__);
521         return 0;
522 }
523
524 static int
525 arge_resume(device_t dev)
526 {
527
528         panic("%s", __func__);
529         return 0;
530 }
531
532 static int
533 arge_shutdown(device_t dev)
534 {
535         struct arge_softc       *sc;
536
537         sc = device_get_softc(dev);
538
539         ARGE_LOCK(sc);
540         arge_stop(sc);
541         ARGE_UNLOCK(sc);
542
543         return (0);
544 }
545
546 static int
547 arge_miibus_readreg(device_t dev, int phy, int reg)
548 {
549         struct arge_softc * sc = device_get_softc(dev);
550         int i, result;
551         uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT) 
552             | (reg & MAC_MII_REG_MASK);
553
554         if ((sc->arge_phymask  & (1 << phy)) == 0)
555                 return (0);
556
557         mtx_lock(&miibus_mtx);
558         ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
559         ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
560         ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
561
562         i = ARGE_MII_TIMEOUT;
563         while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) & 
564             MAC_MII_INDICATOR_BUSY) && (i--))
565                 DELAY(5);
566
567         if (i < 0) {
568                 mtx_unlock(&miibus_mtx);
569                 dprintf("%s timedout\n", __func__);
570                 /* XXX: return ERRNO istead? */
571                 return (-1);
572         }
573
574         result = ARGE_MII_READ(AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
575         ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
576         mtx_unlock(&miibus_mtx);
577
578         dprintf("%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__, 
579                  phy, reg, addr, result);
580
581         return (result);
582 }
583
584 static int
585 arge_miibus_writereg(device_t dev, int phy, int reg, int data)
586 {
587         struct arge_softc * sc = device_get_softc(dev);
588         int i;
589         uint32_t addr = 
590             (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
591
592
593         if ((sc->arge_phymask  & (1 << phy)) == 0)
594                 return (-1);
595
596         dprintf("%s: phy=%d, reg=%02x, value=%04x\n", __func__, 
597             phy, reg, data);
598
599         mtx_lock(&miibus_mtx);
600         ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
601         ARGE_MII_WRITE(AR71XX_MAC_MII_CONTROL, data);
602
603         i = ARGE_MII_TIMEOUT;
604         while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) & 
605             MAC_MII_INDICATOR_BUSY) && (i--))
606                 DELAY(5);
607
608         mtx_unlock(&miibus_mtx);
609
610         if (i < 0) {
611                 dprintf("%s timedout\n", __func__);
612                 /* XXX: return ERRNO istead? */
613                 return (-1);
614         }
615
616         return (0);
617 }
618
619 static void
620 arge_miibus_statchg(device_t dev)
621 {
622         struct arge_softc               *sc;
623
624         sc = device_get_softc(dev);
625         taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
626 }
627
628 static void
629 arge_link_task(void *arg, int pending)
630 {
631         struct arge_softc       *sc;
632         struct mii_data         *mii;
633         struct ifnet            *ifp;
634         uint32_t                media, duplex;
635
636         sc = (struct arge_softc *)arg;
637
638         ARGE_LOCK(sc);
639         mii = device_get_softc(sc->arge_miibus);
640         ifp = sc->arge_ifp;
641         if (mii == NULL || ifp == NULL ||
642             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
643                 ARGE_UNLOCK(sc);
644                 return;
645         }
646
647         if (mii->mii_media_status & IFM_ACTIVE) {
648
649                 media = IFM_SUBTYPE(mii->mii_media_active);
650
651                 if (media != IFM_NONE) {
652                         sc->arge_link_status = 1;
653                         duplex = mii->mii_media_active & IFM_GMASK;
654                         arge_set_pll(sc, media, duplex);
655                 }
656         } else
657                 sc->arge_link_status = 0;
658
659         ARGE_UNLOCK(sc);
660 }
661
662 static void
663 arge_set_pll(struct arge_softc *sc, int media, int duplex)
664 {
665         uint32_t                cfg, ifcontrol, rx_filtmask;
666         int if_speed;
667
668         cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
669         cfg &= ~(MAC_CFG2_IFACE_MODE_1000 
670             | MAC_CFG2_IFACE_MODE_10_100 
671             | MAC_CFG2_FULL_DUPLEX);
672
673         if (duplex == IFM_FDX)
674                 cfg |= MAC_CFG2_FULL_DUPLEX;
675
676         ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
677         ifcontrol &= ~MAC_IFCONTROL_SPEED;
678         rx_filtmask = 
679             ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
680         rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
681
682         switch(media) {
683         case IFM_10_T:
684                 cfg |= MAC_CFG2_IFACE_MODE_10_100;
685                 if_speed = 10;
686                 break;
687         case IFM_100_TX:
688                 cfg |= MAC_CFG2_IFACE_MODE_10_100;
689                 ifcontrol |= MAC_IFCONTROL_SPEED;
690                 if_speed = 100;
691                 break;
692         case IFM_1000_T:
693         case IFM_1000_SX:
694                 cfg |= MAC_CFG2_IFACE_MODE_1000;
695                 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
696                 if_speed = 1000;
697                 break;
698         default:
699                 if_speed = 100;
700                 device_printf(sc->arge_dev, 
701                     "Unknown media %d\n", media);
702         }
703
704         ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD,
705             0x008001ff);
706
707         ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
708         ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
709         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 
710             rx_filtmask);
711
712         /* set PLL registers */
713         if (sc->arge_mac_unit == 0)
714                 ar71xx_device_set_pll_ge0(if_speed);
715         else
716                 ar71xx_device_set_pll_ge1(if_speed);
717 }
718
719
720 static void
721 arge_reset_dma(struct arge_softc *sc)
722 {
723         ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
724         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
725
726         ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
727         ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
728
729         /* Clear all possible RX interrupts */
730         while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
731                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
732
733         /* 
734          * Clear all possible TX interrupts
735          */
736         while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
737                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
738
739         /* 
740          * Now Rx/Tx errors
741          */
742         ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, 
743             DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
744         ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, 
745             DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
746 }
747
748
749
750 static void
751 arge_init(void *xsc)
752 {
753         struct arge_softc        *sc = xsc;
754
755         ARGE_LOCK(sc);
756         arge_init_locked(sc);
757         ARGE_UNLOCK(sc);
758 }
759
760 static void
761 arge_init_locked(struct arge_softc *sc)
762 {
763         struct ifnet            *ifp = sc->arge_ifp;
764         struct mii_data         *mii;
765
766         ARGE_LOCK_ASSERT(sc);
767
768         arge_stop(sc);
769
770         /* Init circular RX list. */
771         if (arge_rx_ring_init(sc) != 0) {
772                 device_printf(sc->arge_dev,
773                     "initialization failed: no memory for rx buffers\n");
774                 arge_stop(sc);
775                 return;
776         }
777
778         /* Init tx descriptors. */
779         arge_tx_ring_init(sc);
780
781         arge_reset_dma(sc);
782
783
784         if (sc->arge_miibus) {
785                 sc->arge_link_status = 0;
786                 mii = device_get_softc(sc->arge_miibus);
787                 mii_mediachg(mii);
788         }
789         else {
790                 /*
791                  * Sun always shines over multiPHY interface
792                  */
793                 sc->arge_link_status = 1;
794         }
795
796         ifp->if_drv_flags |= IFF_DRV_RUNNING;
797         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
798
799         if (sc->arge_miibus)
800                 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
801
802         ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
803         ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
804
805         /* Start listening */
806         ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
807
808         /* Enable interrupts */
809         ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
810 }
811
812 /*
813  * Return whether the mbuf chain is correctly aligned
814  * for the arge TX engine.
815  *
816  * The TX engine requires each fragment to be aligned to a
817  * 4 byte boundary and the size of each fragment except
818  * the last to be a multiple of 4 bytes.
819  */
820 static int
821 arge_mbuf_chain_is_tx_aligned(struct mbuf *m0)
822 {
823         struct mbuf *m;
824
825         for (m = m0; m != NULL; m = m->m_next) {
826                 if((mtod(m, intptr_t) & 3) != 0)
827                         return 0;
828                 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0))
829                         return 0;
830         }
831         return 1;
832 }
833
834 /*
835  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
836  * pointers to the fragment pointers.
837  */
838 static int
839 arge_encap(struct arge_softc *sc, struct mbuf **m_head)
840 {
841         struct arge_txdesc      *txd;
842         struct arge_desc        *desc, *prev_desc;
843         bus_dma_segment_t       txsegs[ARGE_MAXFRAGS];
844         int                     error, i, nsegs, prod, prev_prod;
845         struct mbuf             *m;
846
847         ARGE_LOCK_ASSERT(sc);
848
849         /*
850          * Fix mbuf chain, all fragments should be 4 bytes aligned and
851          * even 4 bytes
852          */
853         m = *m_head;
854         if (! arge_mbuf_chain_is_tx_aligned(m)) {
855                 sc->stats.tx_pkts_unaligned++;
856                 m = m_defrag(*m_head, M_DONTWAIT);
857                 if (m == NULL) {
858                         *m_head = NULL;
859                         return (ENOBUFS);
860                 }
861                 *m_head = m;
862         } else
863                 sc->stats.tx_pkts_aligned++;
864
865         prod = sc->arge_cdata.arge_tx_prod;
866         txd = &sc->arge_cdata.arge_txdesc[prod];
867         error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag, 
868             txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
869
870         if (error == EFBIG) {
871                 panic("EFBIG");
872         } else if (error != 0)
873                 return (error);
874
875         if (nsegs == 0) {
876                 m_freem(*m_head);
877                 *m_head = NULL;
878                 return (EIO);
879         }
880
881         /* Check number of available descriptors. */
882         if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) {
883                 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
884                 return (ENOBUFS);
885         }
886
887         txd->tx_m = *m_head;
888         bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
889             BUS_DMASYNC_PREWRITE);
890
891         /* 
892          * Make a list of descriptors for this packet. DMA controller will
893          * walk through it while arge_link is not zero.
894          */
895         prev_prod = prod;
896         desc = prev_desc = NULL;
897         for (i = 0; i < nsegs; i++) {
898                 desc = &sc->arge_rdata.arge_tx_ring[prod];
899                 desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len);
900
901                 if (txsegs[i].ds_addr & 3)
902                         panic("TX packet address unaligned\n");
903
904                 desc->packet_addr = txsegs[i].ds_addr;
905                 
906                 /* link with previous descriptor */
907                 if (prev_desc)
908                         prev_desc->packet_ctrl |= ARGE_DESC_MORE;
909
910                 sc->arge_cdata.arge_tx_cnt++;
911                 prev_desc = desc;
912                 ARGE_INC(prod, ARGE_TX_RING_COUNT);
913         }
914
915         /* Update producer index. */
916         sc->arge_cdata.arge_tx_prod = prod;
917
918         /* Sync descriptors. */
919         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
920             sc->arge_cdata.arge_tx_ring_map,
921             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
922
923         /* Start transmitting */
924         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
925         return (0);
926 }
927
928 static void
929 arge_start(struct ifnet *ifp)
930 {
931         struct arge_softc        *sc;
932
933         sc = ifp->if_softc;
934
935         ARGE_LOCK(sc);
936         arge_start_locked(ifp);
937         ARGE_UNLOCK(sc);
938 }
939
940 static void
941 arge_start_locked(struct ifnet *ifp)
942 {
943         struct arge_softc       *sc;
944         struct mbuf             *m_head;
945         int                     enq;
946
947         sc = ifp->if_softc;
948
949         ARGE_LOCK_ASSERT(sc);
950
951         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
952             IFF_DRV_RUNNING || sc->arge_link_status == 0 )
953                 return;
954
955         arge_flush_ddr(sc);
956
957         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
958             sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
959                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
960                 if (m_head == NULL)
961                         break;
962
963
964                 /*
965                  * Pack the data into the transmit ring.
966                  */
967                 if (arge_encap(sc, &m_head)) {
968                         if (m_head == NULL)
969                                 break;
970                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
971                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
972                         break;
973                 }
974
975                 enq++;
976                 /*
977                  * If there's a BPF listener, bounce a copy of this frame
978                  * to him.
979                  */
980                 ETHER_BPF_MTAP(ifp, m_head);
981         }
982 }
983
984 static void
985 arge_stop(struct arge_softc *sc)
986 {
987         struct ifnet        *ifp;
988
989         ARGE_LOCK_ASSERT(sc);
990
991         ifp = sc->arge_ifp;
992         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
993         if (sc->arge_miibus)
994                 callout_stop(&sc->arge_stat_callout);
995
996         /* mask out interrupts */
997         ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
998
999         arge_reset_dma(sc);
1000 }
1001
1002
1003 static int
1004 arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1005 {
1006         struct arge_softc               *sc = ifp->if_softc;
1007         struct ifreq            *ifr = (struct ifreq *) data;
1008         struct mii_data         *mii;
1009         int                     error;
1010 #ifdef DEVICE_POLLING
1011         int                     mask;
1012 #endif
1013
1014         switch (command) {
1015         case SIOCSIFFLAGS:
1016                 ARGE_LOCK(sc);
1017                 if ((ifp->if_flags & IFF_UP) != 0) {
1018                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1019                                 if (((ifp->if_flags ^ sc->arge_if_flags)
1020                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1021                                         /* XXX: handle promisc & multi flags */
1022                                 }
1023                                         
1024                         } else {
1025                                 if (!sc->arge_detach)
1026                                         arge_init_locked(sc);
1027                         }
1028                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1029                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1030                         arge_stop(sc);
1031                 }
1032                 sc->arge_if_flags = ifp->if_flags;
1033                 ARGE_UNLOCK(sc);
1034                 error = 0;
1035                 break;
1036         case SIOCADDMULTI:
1037         case SIOCDELMULTI:
1038                 /* XXX: implement SIOCDELMULTI */
1039                 error = 0;
1040                 break;
1041         case SIOCGIFMEDIA:
1042         case SIOCSIFMEDIA:
1043                 if (sc->arge_miibus) {
1044                         mii = device_get_softc(sc->arge_miibus);
1045                         error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1046                 }
1047                 else 
1048                         error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, command);
1049                 break;
1050         case SIOCSIFCAP:
1051                 /* XXX: Check other capabilities */
1052 #ifdef DEVICE_POLLING
1053                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1054                 if (mask & IFCAP_POLLING) {
1055                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1056                                 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1057                                 error = ether_poll_register(arge_poll, ifp);
1058                                 if (error)
1059                                         return error;
1060                                 ARGE_LOCK(sc);
1061                                 ifp->if_capenable |= IFCAP_POLLING;
1062                                 ARGE_UNLOCK(sc);
1063                         } else {
1064                                 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1065                                 error = ether_poll_deregister(ifp);
1066                                 ARGE_LOCK(sc);
1067                                 ifp->if_capenable &= ~IFCAP_POLLING;
1068                                 ARGE_UNLOCK(sc);
1069                         }
1070                 }
1071                 error = 0;
1072                 break;
1073 #endif
1074         default:
1075                 error = ether_ioctl(ifp, command, data);
1076                 break;
1077         }
1078
1079         return (error);
1080 }
1081
1082 /*
1083  * Set media options.
1084  */
1085 static int
1086 arge_ifmedia_upd(struct ifnet *ifp)
1087 {
1088         struct arge_softc               *sc;
1089         struct mii_data         *mii;
1090         struct mii_softc        *miisc;
1091         int                     error;
1092
1093         sc = ifp->if_softc;
1094         ARGE_LOCK(sc);
1095         mii = device_get_softc(sc->arge_miibus);
1096         if (mii->mii_instance) {
1097                 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1098                         mii_phy_reset(miisc);
1099         }
1100         error = mii_mediachg(mii);
1101         ARGE_UNLOCK(sc);
1102
1103         return (error);
1104 }
1105
1106 /*
1107  * Report current media status.
1108  */
1109 static void
1110 arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1111 {
1112         struct arge_softc               *sc = ifp->if_softc;
1113         struct mii_data         *mii;
1114
1115         mii = device_get_softc(sc->arge_miibus);
1116         ARGE_LOCK(sc);
1117         mii_pollstat(mii);
1118         ARGE_UNLOCK(sc);
1119         ifmr->ifm_active = mii->mii_media_active;
1120         ifmr->ifm_status = mii->mii_media_status;
1121 }
1122
1123 struct arge_dmamap_arg {
1124         bus_addr_t      arge_busaddr;
1125 };
1126
1127 static void
1128 arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1129 {
1130         struct arge_dmamap_arg  *ctx;
1131
1132         if (error != 0)
1133                 return;
1134         ctx = arg;
1135         ctx->arge_busaddr = segs[0].ds_addr;
1136 }
1137
1138 static int
1139 arge_dma_alloc(struct arge_softc *sc)
1140 {
1141         struct arge_dmamap_arg  ctx;
1142         struct arge_txdesc      *txd;
1143         struct arge_rxdesc      *rxd;
1144         int                     error, i;
1145
1146         /* Create parent DMA tag. */
1147         error = bus_dma_tag_create(
1148             bus_get_dma_tag(sc->arge_dev),      /* parent */
1149             1, 0,                       /* alignment, boundary */
1150             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1151             BUS_SPACE_MAXADDR,          /* highaddr */
1152             NULL, NULL,                 /* filter, filterarg */
1153             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1154             0,                          /* nsegments */
1155             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1156             0,                          /* flags */
1157             NULL, NULL,                 /* lockfunc, lockarg */
1158             &sc->arge_cdata.arge_parent_tag);
1159         if (error != 0) {
1160                 device_printf(sc->arge_dev, "failed to create parent DMA tag\n");
1161                 goto fail;
1162         }
1163         /* Create tag for Tx ring. */
1164         error = bus_dma_tag_create(
1165             sc->arge_cdata.arge_parent_tag,     /* parent */
1166             ARGE_RING_ALIGN, 0,         /* alignment, boundary */
1167             BUS_SPACE_MAXADDR,          /* lowaddr */
1168             BUS_SPACE_MAXADDR,          /* highaddr */
1169             NULL, NULL,                 /* filter, filterarg */
1170             ARGE_TX_DMA_SIZE,           /* maxsize */
1171             1,                          /* nsegments */
1172             ARGE_TX_DMA_SIZE,           /* maxsegsize */
1173             0,                          /* flags */
1174             NULL, NULL,                 /* lockfunc, lockarg */
1175             &sc->arge_cdata.arge_tx_ring_tag);
1176         if (error != 0) {
1177                 device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n");
1178                 goto fail;
1179         }
1180
1181         /* Create tag for Rx ring. */
1182         error = bus_dma_tag_create(
1183             sc->arge_cdata.arge_parent_tag,     /* parent */
1184             ARGE_RING_ALIGN, 0,         /* alignment, boundary */
1185             BUS_SPACE_MAXADDR,          /* lowaddr */
1186             BUS_SPACE_MAXADDR,          /* highaddr */
1187             NULL, NULL,                 /* filter, filterarg */
1188             ARGE_RX_DMA_SIZE,           /* maxsize */
1189             1,                          /* nsegments */
1190             ARGE_RX_DMA_SIZE,           /* maxsegsize */
1191             0,                          /* flags */
1192             NULL, NULL,                 /* lockfunc, lockarg */
1193             &sc->arge_cdata.arge_rx_ring_tag);
1194         if (error != 0) {
1195                 device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n");
1196                 goto fail;
1197         }
1198
1199         /* Create tag for Tx buffers. */
1200         error = bus_dma_tag_create(
1201             sc->arge_cdata.arge_parent_tag,     /* parent */
1202             sizeof(uint32_t), 0,        /* alignment, boundary */
1203             BUS_SPACE_MAXADDR,          /* lowaddr */
1204             BUS_SPACE_MAXADDR,          /* highaddr */
1205             NULL, NULL,                 /* filter, filterarg */
1206             MCLBYTES * ARGE_MAXFRAGS,   /* maxsize */
1207             ARGE_MAXFRAGS,              /* nsegments */
1208             MCLBYTES,                   /* maxsegsize */
1209             0,                          /* flags */
1210             NULL, NULL,                 /* lockfunc, lockarg */
1211             &sc->arge_cdata.arge_tx_tag);
1212         if (error != 0) {
1213                 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1214                 goto fail;
1215         }
1216
1217         /* Create tag for Rx buffers. */
1218         error = bus_dma_tag_create(
1219             sc->arge_cdata.arge_parent_tag,     /* parent */
1220             ARGE_RX_ALIGN, 0,           /* alignment, boundary */
1221             BUS_SPACE_MAXADDR,          /* lowaddr */
1222             BUS_SPACE_MAXADDR,          /* highaddr */
1223             NULL, NULL,                 /* filter, filterarg */
1224             MCLBYTES,                   /* maxsize */
1225             ARGE_MAXFRAGS,              /* nsegments */
1226             MCLBYTES,                   /* maxsegsize */
1227             0,                          /* flags */
1228             NULL, NULL,                 /* lockfunc, lockarg */
1229             &sc->arge_cdata.arge_rx_tag);
1230         if (error != 0) {
1231                 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1232                 goto fail;
1233         }
1234
1235         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1236         error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1237             (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1238             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map);
1239         if (error != 0) {
1240                 device_printf(sc->arge_dev,
1241                     "failed to allocate DMA'able memory for Tx ring\n");
1242                 goto fail;
1243         }
1244
1245         ctx.arge_busaddr = 0;
1246         error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1247             sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1248             ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1249         if (error != 0 || ctx.arge_busaddr == 0) {
1250                 device_printf(sc->arge_dev,
1251                     "failed to load DMA'able memory for Tx ring\n");
1252                 goto fail;
1253         }
1254         sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1255
1256         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1257         error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1258             (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1259             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map);
1260         if (error != 0) {
1261                 device_printf(sc->arge_dev,
1262                     "failed to allocate DMA'able memory for Rx ring\n");
1263                 goto fail;
1264         }
1265
1266         ctx.arge_busaddr = 0;
1267         error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1268             sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1269             ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1270         if (error != 0 || ctx.arge_busaddr == 0) {
1271                 device_printf(sc->arge_dev,
1272                     "failed to load DMA'able memory for Rx ring\n");
1273                 goto fail;
1274         }
1275         sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
1276
1277         /* Create DMA maps for Tx buffers. */
1278         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1279                 txd = &sc->arge_cdata.arge_txdesc[i];
1280                 txd->tx_m = NULL;
1281                 txd->tx_dmamap = NULL;
1282                 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
1283                     &txd->tx_dmamap);
1284                 if (error != 0) {
1285                         device_printf(sc->arge_dev,
1286                             "failed to create Tx dmamap\n");
1287                         goto fail;
1288                 }
1289         }
1290         /* Create DMA maps for Rx buffers. */
1291         if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1292             &sc->arge_cdata.arge_rx_sparemap)) != 0) {
1293                 device_printf(sc->arge_dev,
1294                     "failed to create spare Rx dmamap\n");
1295                 goto fail;
1296         }
1297         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1298                 rxd = &sc->arge_cdata.arge_rxdesc[i];
1299                 rxd->rx_m = NULL;
1300                 rxd->rx_dmamap = NULL;
1301                 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1302                     &rxd->rx_dmamap);
1303                 if (error != 0) {
1304                         device_printf(sc->arge_dev,
1305                             "failed to create Rx dmamap\n");
1306                         goto fail;
1307                 }
1308         }
1309
1310 fail:
1311         return (error);
1312 }
1313
1314 static void
1315 arge_dma_free(struct arge_softc *sc)
1316 {
1317         struct arge_txdesc      *txd;
1318         struct arge_rxdesc      *rxd;
1319         int                     i;
1320
1321         /* Tx ring. */
1322         if (sc->arge_cdata.arge_tx_ring_tag) {
1323                 if (sc->arge_cdata.arge_tx_ring_map)
1324                         bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
1325                             sc->arge_cdata.arge_tx_ring_map);
1326                 if (sc->arge_cdata.arge_tx_ring_map &&
1327                     sc->arge_rdata.arge_tx_ring)
1328                         bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
1329                             sc->arge_rdata.arge_tx_ring,
1330                             sc->arge_cdata.arge_tx_ring_map);
1331                 sc->arge_rdata.arge_tx_ring = NULL;
1332                 sc->arge_cdata.arge_tx_ring_map = NULL;
1333                 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
1334                 sc->arge_cdata.arge_tx_ring_tag = NULL;
1335         }
1336         /* Rx ring. */
1337         if (sc->arge_cdata.arge_rx_ring_tag) {
1338                 if (sc->arge_cdata.arge_rx_ring_map)
1339                         bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
1340                             sc->arge_cdata.arge_rx_ring_map);
1341                 if (sc->arge_cdata.arge_rx_ring_map &&
1342                     sc->arge_rdata.arge_rx_ring)
1343                         bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
1344                             sc->arge_rdata.arge_rx_ring,
1345                             sc->arge_cdata.arge_rx_ring_map);
1346                 sc->arge_rdata.arge_rx_ring = NULL;
1347                 sc->arge_cdata.arge_rx_ring_map = NULL;
1348                 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
1349                 sc->arge_cdata.arge_rx_ring_tag = NULL;
1350         }
1351         /* Tx buffers. */
1352         if (sc->arge_cdata.arge_tx_tag) {
1353                 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1354                         txd = &sc->arge_cdata.arge_txdesc[i];
1355                         if (txd->tx_dmamap) {
1356                                 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
1357                                     txd->tx_dmamap);
1358                                 txd->tx_dmamap = NULL;
1359                         }
1360                 }
1361                 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
1362                 sc->arge_cdata.arge_tx_tag = NULL;
1363         }
1364         /* Rx buffers. */
1365         if (sc->arge_cdata.arge_rx_tag) {
1366                 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1367                         rxd = &sc->arge_cdata.arge_rxdesc[i];
1368                         if (rxd->rx_dmamap) {
1369                                 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1370                                     rxd->rx_dmamap);
1371                                 rxd->rx_dmamap = NULL;
1372                         }
1373                 }
1374                 if (sc->arge_cdata.arge_rx_sparemap) {
1375                         bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1376                             sc->arge_cdata.arge_rx_sparemap);
1377                         sc->arge_cdata.arge_rx_sparemap = 0;
1378                 }
1379                 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
1380                 sc->arge_cdata.arge_rx_tag = NULL;
1381         }
1382
1383         if (sc->arge_cdata.arge_parent_tag) {
1384                 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
1385                 sc->arge_cdata.arge_parent_tag = NULL;
1386         }
1387 }
1388
1389 /*
1390  * Initialize the transmit descriptors.
1391  */
1392 static int
1393 arge_tx_ring_init(struct arge_softc *sc)
1394 {
1395         struct arge_ring_data   *rd;
1396         struct arge_txdesc      *txd;
1397         bus_addr_t              addr;
1398         int                     i;
1399
1400         sc->arge_cdata.arge_tx_prod = 0;
1401         sc->arge_cdata.arge_tx_cons = 0;
1402         sc->arge_cdata.arge_tx_cnt = 0;
1403         sc->arge_cdata.arge_tx_pkts = 0;
1404
1405         rd = &sc->arge_rdata;
1406         bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
1407         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1408                 if (i == ARGE_TX_RING_COUNT - 1)
1409                         addr = ARGE_TX_RING_ADDR(sc, 0);
1410                 else
1411                         addr = ARGE_TX_RING_ADDR(sc, i + 1);
1412                 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
1413                 rd->arge_tx_ring[i].next_desc = addr;
1414                 txd = &sc->arge_cdata.arge_txdesc[i];
1415                 txd->tx_m = NULL;
1416         }
1417
1418         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1419             sc->arge_cdata.arge_tx_ring_map,
1420             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1421
1422         return (0);
1423 }
1424
1425 /*
1426  * Initialize the RX descriptors and allocate mbufs for them. Note that
1427  * we arrange the descriptors in a closed ring, so that the last descriptor
1428  * points back to the first.
1429  */
1430 static int
1431 arge_rx_ring_init(struct arge_softc *sc)
1432 {
1433         struct arge_ring_data   *rd;
1434         struct arge_rxdesc      *rxd;
1435         bus_addr_t              addr;
1436         int                     i;
1437
1438         sc->arge_cdata.arge_rx_cons = 0;
1439
1440         rd = &sc->arge_rdata;
1441         bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
1442         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1443                 rxd = &sc->arge_cdata.arge_rxdesc[i];
1444                 rxd->rx_m = NULL;
1445                 rxd->desc = &rd->arge_rx_ring[i];
1446                 if (i == ARGE_RX_RING_COUNT - 1)
1447                         addr = ARGE_RX_RING_ADDR(sc, 0);
1448                 else
1449                         addr = ARGE_RX_RING_ADDR(sc, i + 1);
1450                 rd->arge_rx_ring[i].next_desc = addr;
1451                 if (arge_newbuf(sc, i) != 0) {
1452                         return (ENOBUFS);
1453                 }
1454         }
1455
1456         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1457             sc->arge_cdata.arge_rx_ring_map,
1458             BUS_DMASYNC_PREWRITE);
1459
1460         return (0);
1461 }
1462
1463 /*
1464  * Initialize an RX descriptor and attach an MBUF cluster.
1465  */
1466 static int
1467 arge_newbuf(struct arge_softc *sc, int idx)
1468 {
1469         struct arge_desc                *desc;
1470         struct arge_rxdesc      *rxd;
1471         struct mbuf             *m;
1472         bus_dma_segment_t       segs[1];
1473         bus_dmamap_t            map;
1474         int                     nsegs;
1475
1476         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1477         if (m == NULL)
1478                 return (ENOBUFS);
1479         m->m_len = m->m_pkthdr.len = MCLBYTES;
1480         m_adj(m, sizeof(uint64_t));
1481
1482         if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
1483             sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1484                 m_freem(m);
1485                 return (ENOBUFS);
1486         }
1487         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1488
1489         rxd = &sc->arge_cdata.arge_rxdesc[idx];
1490         if (rxd->rx_m != NULL) {
1491                 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
1492         }
1493         map = rxd->rx_dmamap;
1494         rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
1495         sc->arge_cdata.arge_rx_sparemap = map;
1496         rxd->rx_m = m;
1497         desc = rxd->desc;
1498         if (segs[0].ds_addr & 3)
1499                 panic("RX packet address unaligned");
1500         desc->packet_addr = segs[0].ds_addr;
1501         desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
1502
1503         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1504             sc->arge_cdata.arge_rx_ring_map,
1505             BUS_DMASYNC_PREWRITE);
1506
1507         return (0);
1508 }
1509
1510 static __inline void
1511 arge_fixup_rx(struct mbuf *m)
1512 {
1513         int             i;
1514         uint16_t        *src, *dst;
1515
1516         src = mtod(m, uint16_t *);
1517         dst = src - 1;
1518
1519         for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1520                 *dst++ = *src++;
1521         }
1522
1523         if (m->m_len % sizeof(uint16_t))
1524                 *(uint8_t *)dst = *(uint8_t *)src;
1525
1526         m->m_data -= ETHER_ALIGN;
1527 }
1528
1529 #ifdef DEVICE_POLLING
1530 static int
1531 arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1532 {
1533         struct arge_softc *sc = ifp->if_softc;
1534         int rx_npkts = 0;
1535
1536         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1537                 ARGE_LOCK(sc);
1538                 arge_tx_locked(sc);
1539                 rx_npkts = arge_rx_locked(sc);
1540                 ARGE_UNLOCK(sc);
1541         }
1542
1543         return (rx_npkts);
1544 }
1545 #endif /* DEVICE_POLLING */
1546
1547
1548 static void
1549 arge_tx_locked(struct arge_softc *sc)
1550 {
1551         struct arge_txdesc      *txd;
1552         struct arge_desc        *cur_tx;
1553         struct ifnet            *ifp;
1554         uint32_t                ctrl;
1555         int                     cons, prod;
1556
1557         ARGE_LOCK_ASSERT(sc);
1558
1559         cons = sc->arge_cdata.arge_tx_cons;
1560         prod = sc->arge_cdata.arge_tx_prod;
1561         if (cons == prod)
1562                 return;
1563
1564         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1565             sc->arge_cdata.arge_tx_ring_map,
1566             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1567
1568         ifp = sc->arge_ifp;
1569         /*
1570          * Go through our tx list and free mbufs for those
1571          * frames that have been transmitted.
1572          */
1573         for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
1574                 cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
1575                 ctrl = cur_tx->packet_ctrl;
1576                 /* Check if descriptor has "finished" flag */
1577                 if ((ctrl & ARGE_DESC_EMPTY) == 0)
1578                         break;
1579
1580                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1581
1582                 sc->arge_cdata.arge_tx_cnt--;
1583                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1584
1585                 txd = &sc->arge_cdata.arge_txdesc[cons];
1586
1587                 ifp->if_opackets++;
1588
1589                 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1590                     BUS_DMASYNC_POSTWRITE);
1591                 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1592
1593                 /* Free only if it's first descriptor in list */
1594                 if (txd->tx_m)
1595                         m_freem(txd->tx_m);
1596                 txd->tx_m = NULL;
1597
1598                 /* reset descriptor */
1599                 cur_tx->packet_addr = 0;
1600         }
1601
1602         sc->arge_cdata.arge_tx_cons = cons;
1603
1604         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1605             sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1606 }
1607
1608
1609 static int
1610 arge_rx_locked(struct arge_softc *sc)
1611 {
1612         struct arge_rxdesc      *rxd;
1613         struct ifnet            *ifp = sc->arge_ifp;
1614         int                     cons, prog, packet_len, i;
1615         struct arge_desc        *cur_rx;
1616         struct mbuf             *m;
1617         int                     rx_npkts = 0;
1618
1619         ARGE_LOCK_ASSERT(sc);
1620
1621         cons = sc->arge_cdata.arge_rx_cons;
1622
1623         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1624             sc->arge_cdata.arge_rx_ring_map,
1625             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1626
1627         for (prog = 0; prog < ARGE_RX_RING_COUNT; 
1628             ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
1629                 cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
1630                 rxd = &sc->arge_cdata.arge_rxdesc[cons];
1631                 m = rxd->rx_m;
1632
1633                 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
1634                        break;   
1635
1636                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1637
1638                 prog++;
1639
1640                 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
1641                 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
1642                     BUS_DMASYNC_POSTREAD);
1643                 m = rxd->rx_m;
1644
1645                 arge_fixup_rx(m);
1646                 m->m_pkthdr.rcvif = ifp;
1647                 /* Skip 4 bytes of CRC */
1648                 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1649                 ifp->if_ipackets++;
1650                 rx_npkts++;
1651
1652                 ARGE_UNLOCK(sc);
1653                 (*ifp->if_input)(ifp, m);
1654                 ARGE_LOCK(sc);
1655                 cur_rx->packet_addr = 0;
1656         }
1657
1658         if (prog > 0) {
1659
1660                 i = sc->arge_cdata.arge_rx_cons;
1661                 for (; prog > 0 ; prog--) {
1662                         if (arge_newbuf(sc, i) != 0) {
1663                                 device_printf(sc->arge_dev, 
1664                                     "Failed to allocate buffer\n");
1665                                 break;
1666                         }
1667                         ARGE_INC(i, ARGE_RX_RING_COUNT);
1668                 }
1669
1670                 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1671                     sc->arge_cdata.arge_rx_ring_map,
1672                     BUS_DMASYNC_PREWRITE);
1673
1674                 sc->arge_cdata.arge_rx_cons = cons;
1675         }
1676
1677         return (rx_npkts);
1678 }
1679
1680 static int
1681 arge_intr_filter(void *arg)
1682 {
1683         struct arge_softc       *sc = arg;
1684         uint32_t                status, ints;
1685
1686         status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1687         ints = ARGE_READ(sc, AR71XX_DMA_INTR);
1688
1689 #if 0
1690         dprintf("int mask(filter) = %b\n", ints,
1691             "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1692             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1693         dprintf("status(filter) = %b\n", status, 
1694             "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1695             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1696 #endif
1697
1698         if (status & DMA_INTR_ALL) {
1699                 sc->arge_intr_status |= status;
1700                 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1701                 return (FILTER_SCHEDULE_THREAD);
1702         } 
1703
1704         sc->arge_intr_status = 0;
1705         return (FILTER_STRAY);
1706 }
1707
1708 static void
1709 arge_intr(void *arg)
1710 {
1711         struct arge_softc       *sc = arg;
1712         uint32_t                status;
1713
1714         status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1715         status |= sc->arge_intr_status;
1716
1717 #if 0
1718         dprintf("int status(intr) = %b\n", status, 
1719             "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD"
1720             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1721 #endif
1722
1723         /* 
1724          * Is it our interrupt at all? 
1725          */
1726         if (status == 0)
1727                 return;
1728
1729         if (status & DMA_INTR_RX_BUS_ERROR) {
1730                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
1731                 device_printf(sc->arge_dev, "RX bus error");
1732                 return;
1733         }
1734
1735         if (status & DMA_INTR_TX_BUS_ERROR) {
1736                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
1737                 device_printf(sc->arge_dev, "TX bus error");
1738                 return;
1739         }
1740
1741         ARGE_LOCK(sc);
1742
1743         if (status & DMA_INTR_RX_PKT_RCVD)
1744                 arge_rx_locked(sc);
1745
1746         /* 
1747          * RX overrun disables the receiver. 
1748          * Clear indication and re-enable rx. 
1749          */
1750         if ( status & DMA_INTR_RX_OVERFLOW) {
1751                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
1752                 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1753         }
1754
1755         if (status & DMA_INTR_TX_PKT_SENT)
1756                 arge_tx_locked(sc);
1757         /* 
1758          * Underrun turns off TX. Clear underrun indication. 
1759          * If there's anything left in the ring, reactivate the tx. 
1760          */
1761         if (status & DMA_INTR_TX_UNDERRUN) {
1762                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
1763                 if (sc->arge_cdata.arge_tx_pkts > 0 ) {
1764                         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 
1765                             DMA_TX_CONTROL_EN);
1766                 }
1767         }
1768
1769         /*
1770          * We handled all bits, clear status
1771          */
1772         sc->arge_intr_status = 0;
1773         ARGE_UNLOCK(sc);
1774         /*
1775          * re-enable all interrupts 
1776          */
1777         ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1778 }
1779
1780
1781 static void
1782 arge_tick(void *xsc)
1783 {
1784         struct arge_softc       *sc = xsc;
1785         struct mii_data         *mii;
1786
1787         ARGE_LOCK_ASSERT(sc);
1788
1789         if (sc->arge_miibus) {
1790                 mii = device_get_softc(sc->arge_miibus);
1791                 mii_tick(mii);
1792                 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1793         }
1794 }
1795
1796 int
1797 arge_multiphy_mediachange(struct ifnet *ifp)
1798 {
1799         struct arge_softc *sc = ifp->if_softc;
1800         struct ifmedia *ifm = &sc->arge_ifmedia;
1801         struct ifmedia_entry *ife = ifm->ifm_cur;
1802
1803         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1804                 return (EINVAL);
1805
1806         if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
1807                 device_printf(sc->arge_dev, 
1808                     "AUTO is not supported for multiphy MAC");
1809                 return (EINVAL);
1810         }
1811
1812         /*
1813          * Ignore everything
1814          */
1815         return (0);
1816 }
1817
1818 void
1819 arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1820 {
1821         struct arge_softc *sc = ifp->if_softc;
1822
1823         ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1824         ifmr->ifm_active = IFM_ETHER | sc->arge_media_type | 
1825             sc->arge_duplex_mode;
1826 }
1827