]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/mips/atheros/if_arge.c
Add in the platform specific quirks to get the AR934x SoC ethernet
[FreeBSD/FreeBSD.git] / sys / mips / atheros / if_arge.c
1 /*-
2  * Copyright (c) 2009, Oleksandr Tymoshenko
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 /*
32  * AR71XX gigabit ethernet driver
33  */
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
36 #endif
37
38 #include "opt_arge.h"
39
40 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/mbuf.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/module.h>
48 #include <sys/socket.h>
49 #include <sys/taskqueue.h>
50 #include <sys/sysctl.h>
51
52 #include <net/if.h>
53 #include <net/if_arp.h>
54 #include <net/ethernet.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58
59 #include <net/bpf.h>
60
61 #include <machine/bus.h>
62 #include <machine/cache.h>
63 #include <machine/resource.h>
64 #include <vm/vm_param.h>
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 #include <machine/pmap.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76
77 #include "opt_arge.h"
78
79 #if defined(ARGE_MDIO)
80 #include <dev/etherswitch/mdio.h>
81 #include <dev/etherswitch/miiproxy.h>
82 #include "mdio_if.h"
83 #endif
84
85
86 MODULE_DEPEND(arge, ether, 1, 1, 1);
87 MODULE_DEPEND(arge, miibus, 1, 1, 1);
88 MODULE_VERSION(arge, 1);
89
90 #include "miibus_if.h"
91
92 #include <mips/atheros/ar71xxreg.h>
93 #include <mips/atheros/ar934xreg.h>     /* XXX tsk! */
94 #include <mips/atheros/if_argevar.h>
95 #include <mips/atheros/ar71xx_setup.h>
96 #include <mips/atheros/ar71xx_cpudef.h>
97
98 typedef enum {
99         ARGE_DBG_MII    =       0x00000001,
100         ARGE_DBG_INTR   =       0x00000002,
101         ARGE_DBG_TX     =       0x00000004,
102         ARGE_DBG_RX     =       0x00000008,
103         ARGE_DBG_ERR    =       0x00000010,
104         ARGE_DBG_RESET  =       0x00000020,
105         ARGE_DBG_PLL    =       0x00000040,
106 } arge_debug_flags;
107
108 static const char * arge_miicfg_str[] = {
109         "NONE",
110         "GMII",
111         "MII",
112         "RGMII",
113         "RMII"
114 };
115
116 #ifdef ARGE_DEBUG
117 #define ARGEDEBUG(_sc, _m, ...)                                         \
118         do {                                                            \
119                 if ((_m) & (_sc)->arge_debug)                           \
120                         device_printf((_sc)->arge_dev, __VA_ARGS__);    \
121         } while (0)
122 #else
123 #define ARGEDEBUG(_sc, _m, ...)
124 #endif
125
126 static int arge_attach(device_t);
127 static int arge_detach(device_t);
128 static void arge_flush_ddr(struct arge_softc *);
129 static int arge_ifmedia_upd(struct ifnet *);
130 static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
131 static int arge_ioctl(struct ifnet *, u_long, caddr_t);
132 static void arge_init(void *);
133 static void arge_init_locked(struct arge_softc *);
134 static void arge_link_task(void *, int);
135 static void arge_update_link_locked(struct arge_softc *sc);
136 static void arge_set_pll(struct arge_softc *, int, int);
137 static int arge_miibus_readreg(device_t, int, int);
138 static void arge_miibus_statchg(device_t);
139 static int arge_miibus_writereg(device_t, int, int, int);
140 static int arge_probe(device_t);
141 static void arge_reset_dma(struct arge_softc *);
142 static int arge_resume(device_t);
143 static int arge_rx_ring_init(struct arge_softc *);
144 static void arge_rx_ring_free(struct arge_softc *sc);
145 static int arge_tx_ring_init(struct arge_softc *);
146 static void arge_tx_ring_free(struct arge_softc *);
147 #ifdef DEVICE_POLLING
148 static int arge_poll(struct ifnet *, enum poll_cmd, int);
149 #endif
150 static int arge_shutdown(device_t);
151 static void arge_start(struct ifnet *);
152 static void arge_start_locked(struct ifnet *);
153 static void arge_stop(struct arge_softc *);
154 static int arge_suspend(device_t);
155
156 static int arge_rx_locked(struct arge_softc *);
157 static void arge_tx_locked(struct arge_softc *);
158 static void arge_intr(void *);
159 static int arge_intr_filter(void *);
160 static void arge_tick(void *);
161
162 static void arge_hinted_child(device_t bus, const char *dname, int dunit);
163
164 /*
165  * ifmedia callbacks for multiPHY MAC
166  */
167 void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *);
168 int arge_multiphy_mediachange(struct ifnet *);
169
170 static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
171 static int arge_dma_alloc(struct arge_softc *);
172 static void arge_dma_free(struct arge_softc *);
173 static int arge_newbuf(struct arge_softc *, int);
174 static __inline void arge_fixup_rx(struct mbuf *);
175
176 static device_method_t arge_methods[] = {
177         /* Device interface */
178         DEVMETHOD(device_probe,         arge_probe),
179         DEVMETHOD(device_attach,        arge_attach),
180         DEVMETHOD(device_detach,        arge_detach),
181         DEVMETHOD(device_suspend,       arge_suspend),
182         DEVMETHOD(device_resume,        arge_resume),
183         DEVMETHOD(device_shutdown,      arge_shutdown),
184
185         /* MII interface */
186         DEVMETHOD(miibus_readreg,       arge_miibus_readreg),
187         DEVMETHOD(miibus_writereg,      arge_miibus_writereg),
188         DEVMETHOD(miibus_statchg,       arge_miibus_statchg),
189
190         /* bus interface */
191         DEVMETHOD(bus_add_child,        device_add_child_ordered),
192         DEVMETHOD(bus_hinted_child,     arge_hinted_child),
193
194         DEVMETHOD_END
195 };
196
197 static driver_t arge_driver = {
198         "arge",
199         arge_methods,
200         sizeof(struct arge_softc)
201 };
202
203 static devclass_t arge_devclass;
204
205 DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
206 DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
207
208 #if defined(ARGE_MDIO)
209 static int argemdio_probe(device_t);
210 static int argemdio_attach(device_t);
211 static int argemdio_detach(device_t);
212
213 /*
214  * Declare an additional, separate driver for accessing the MDIO bus.
215  */
216 static device_method_t argemdio_methods[] = {
217         /* Device interface */
218         DEVMETHOD(device_probe,         argemdio_probe),
219         DEVMETHOD(device_attach,        argemdio_attach),
220         DEVMETHOD(device_detach,        argemdio_detach),
221
222         /* bus interface */
223         DEVMETHOD(bus_add_child,        device_add_child_ordered),
224         
225         /* MDIO access */
226         DEVMETHOD(mdio_readreg,         arge_miibus_readreg),
227         DEVMETHOD(mdio_writereg,        arge_miibus_writereg),
228 };
229
230 DEFINE_CLASS_0(argemdio, argemdio_driver, argemdio_methods,
231     sizeof(struct arge_softc));
232 static devclass_t argemdio_devclass;
233
234 DRIVER_MODULE(miiproxy, arge, miiproxy_driver, miiproxy_devclass, 0, 0);
235 DRIVER_MODULE(argemdio, nexus, argemdio_driver, argemdio_devclass, 0, 0);
236 DRIVER_MODULE(mdio, argemdio, mdio_driver, mdio_devclass, 0, 0);
237 #endif
238
239 /*
240  * RedBoot passes MAC address to entry point as environment
241  * variable. platfrom_start parses it and stores in this variable
242  */
243 extern uint32_t ar711_base_mac[ETHER_ADDR_LEN];
244
245 static struct mtx miibus_mtx;
246
247 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF);
248
249 /*
250  * Flushes all
251  */
252 static void
253 arge_flush_ddr(struct arge_softc *sc)
254 {
255
256         ar71xx_device_flush_ddr_ge(sc->arge_mac_unit);
257 }
258
259 static int
260 arge_probe(device_t dev)
261 {
262
263         device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
264         return (0);
265 }
266
267 static void
268 arge_attach_sysctl(device_t dev)
269 {
270         struct arge_softc *sc = device_get_softc(dev);
271         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
272         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
273
274 #ifdef  ARGE_DEBUG
275         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
276                 "debug", CTLFLAG_RW, &sc->arge_debug, 0,
277                 "arge interface debugging flags");
278 #endif
279
280         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
281                 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0,
282                 "number of TX aligned packets");
283
284         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
285                 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned,
286                 0, "number of TX unaligned packets");
287
288 #ifdef  ARGE_DEBUG
289         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod",
290             CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, "");
291         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons",
292             CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, "");
293         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt",
294             CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, "");
295 #endif
296 }
297
298 static void
299 arge_reset_mac(struct arge_softc *sc)
300 {
301         uint32_t reg;
302         uint32_t reset_reg;
303
304         /* Step 1. Soft-reset MAC */
305         ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
306         DELAY(20);
307
308         /* Step 2. Punt the MAC core from the central reset register */
309         /*
310          * XXX TODO: migrate this (and other) chip specific stuff into
311          * a chipdef method.
312          */
313         if (sc->arge_mac_unit == 0) {
314                 reset_reg = RST_RESET_GE0_MAC;
315         } else {
316                 reset_reg = RST_RESET_GE1_MAC;
317         }
318
319         /*
320          * AR934x (and later) also needs the MDIO block reset.
321          */
322         if (ar71xx_soc == AR71XX_SOC_AR9341 ||
323            ar71xx_soc == AR71XX_SOC_AR9342 ||
324            ar71xx_soc == AR71XX_SOC_AR9344) {
325                 if (sc->arge_mac_unit == 0) {
326                         reset_reg |= AR934X_RESET_GE0_MDIO;
327                 } else {
328                         reset_reg |= AR934X_RESET_GE1_MDIO;
329                 }
330         }
331         ar71xx_device_stop(reset_reg);
332         DELAY(100);
333         ar71xx_device_start(reset_reg);
334
335         /* Step 3. Reconfigure MAC block */
336         ARGE_WRITE(sc, AR71XX_MAC_CFG1,
337                 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
338                 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
339
340         reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
341         reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
342         ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
343
344         ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
345 }
346
347 /*
348  * Fetch the MDIO bus clock rate.
349  *
350  * For now, the default is DIV_28 for everything
351  * bar AR934x, which will be DIV_42.
352  *
353  * It will definitely need updating to take into account
354  * the MDIO bus core clock rate and the target clock
355  * rate for the chip.
356  */
357 static uint32_t
358 arge_fetch_mdiobus_clock_rate(struct arge_softc *sc)
359 {
360
361         switch (ar71xx_soc) {
362         case AR71XX_SOC_AR9341:
363         case AR71XX_SOC_AR9342:
364         case AR71XX_SOC_AR9344:
365                 return (MAC_MII_CFG_CLOCK_DIV_42);
366         default:
367                 return (MAC_MII_CFG_CLOCK_DIV_28);
368         }
369 }
370
371 static void
372 arge_reset_miibus(struct arge_softc *sc)
373 {
374         uint32_t mdio_div;
375
376         mdio_div = arge_fetch_mdiobus_clock_rate(sc);
377
378         /*
379          * XXX AR934x and later; should we be also resetting the
380          * MDIO block(s) using the reset register block?
381          */
382
383         /* Reset MII bus; program in the default divisor */
384         ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET | mdio_div);
385         DELAY(100);
386         ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, mdio_div);
387         DELAY(100);
388 }
389
390 static void
391 arge_fetch_pll_config(struct arge_softc *sc)
392 {
393         long int val;
394
395         if (resource_long_value(device_get_name(sc->arge_dev),
396             device_get_unit(sc->arge_dev),
397             "pll_10", &val) == 0) {
398                 sc->arge_pllcfg.pll_10 = val;
399                 device_printf(sc->arge_dev, "%s: pll_10 = 0x%x\n",
400                     __func__, (int) val);
401         }
402         if (resource_long_value(device_get_name(sc->arge_dev),
403             device_get_unit(sc->arge_dev),
404             "pll_100", &val) == 0) {
405                 sc->arge_pllcfg.pll_100 = val;
406                 device_printf(sc->arge_dev, "%s: pll_100 = 0x%x\n",
407                     __func__, (int) val);
408         }
409         if (resource_long_value(device_get_name(sc->arge_dev),
410             device_get_unit(sc->arge_dev),
411             "pll_1000", &val) == 0) {
412                 sc->arge_pllcfg.pll_1000 = val;
413                 device_printf(sc->arge_dev, "%s: pll_1000 = 0x%x\n",
414                     __func__, (int) val);
415         }
416 }
417
418 static int
419 arge_attach(device_t dev)
420 {
421         struct ifnet            *ifp;
422         struct arge_softc       *sc;
423         int                     error = 0, rid;
424         uint32_t                rnd;
425         int                     is_base_mac_empty, i;
426         uint32_t                hint;
427         long                    eeprom_mac_addr = 0;
428         int                     miicfg = 0;
429         int                     readascii = 0;
430
431         sc = device_get_softc(dev);
432         sc->arge_dev = dev;
433         sc->arge_mac_unit = device_get_unit(dev);
434
435         /*
436          * Some units (eg the TP-Link WR-1043ND) do not have a convenient
437          * EEPROM location to read the ethernet MAC address from.
438          * OpenWRT simply snaffles it from a fixed location.
439          *
440          * Since multiple units seem to use this feature, include
441          * a method of setting the MAC address based on an flash location
442          * in CPU address space.
443          *
444          * Some vendors have decided to store the mac address as a literal
445          * string of 18 characters in xx:xx:xx:xx:xx:xx format instead of
446          * an array of numbers.  Expose a hint to turn on this conversion
447          * feature via strtol()
448          */
449          if (resource_long_value(device_get_name(dev), device_get_unit(dev),
450             "eeprommac", &eeprom_mac_addr) == 0) {
451                 int i;
452                 const char *mac =
453                     (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr);
454                 device_printf(dev, "Overriding MAC from EEPROM\n");
455                 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
456                         "readascii", &readascii) == 0) {
457                         device_printf(dev, "Vendor stores MAC in ASCII format\n");
458                         for (i = 0; i < 6; i++) {
459                                 ar711_base_mac[i] = strtol(&(mac[i*3]), NULL, 16);
460                         }
461                 } else {
462                         for (i = 0; i < 6; i++) {
463                                 ar711_base_mac[i] = mac[i];
464                         }
465                 }
466         }
467
468         KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)),
469             ("if_arge: Only MAC0 and MAC1 supported"));
470
471         /*
472          * Fetch the PLL configuration.
473          */
474         arge_fetch_pll_config(sc);
475
476         /*
477          * Get the MII configuration, if applicable.
478          */
479         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
480             "miimode", &miicfg) == 0) {
481                 /* XXX bounds check? */
482                 device_printf(dev, "%s: overriding MII mode to '%s'\n",
483                     __func__, arge_miicfg_str[miicfg]);
484                 sc->arge_miicfg = miicfg;
485         }
486
487         /*
488          *  Get which PHY of 5 available we should use for this unit
489          */
490         if (resource_int_value(device_get_name(dev), device_get_unit(dev), 
491             "phymask", &sc->arge_phymask) != 0) {
492                 /*
493                  * Use port 4 (WAN) for GE0. For any other port use
494                  * its PHY the same as its unit number
495                  */
496                 if (sc->arge_mac_unit == 0)
497                         sc->arge_phymask = (1 << 4);
498                 else
499                         /* Use all phys up to 4 */
500                         sc->arge_phymask = (1 << 4) - 1;
501
502                 device_printf(dev, "No PHY specified, using mask %d\n", sc->arge_phymask);
503         }
504
505         /*
506          *  Get default media & duplex mode, by default its Base100T
507          *  and full duplex
508          */
509         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
510             "media", &hint) != 0)
511                 hint = 0;
512
513         if (hint == 1000)
514                 sc->arge_media_type = IFM_1000_T;
515         else
516                 sc->arge_media_type = IFM_100_TX;
517
518         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
519             "fduplex", &hint) != 0)
520                 hint = 1;
521
522         if (hint)
523                 sc->arge_duplex_mode = IFM_FDX;
524         else
525                 sc->arge_duplex_mode = 0;
526
527         mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
528             MTX_DEF);
529         callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
530         TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
531
532         /* Map control/status registers. */
533         sc->arge_rid = 0;
534         sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 
535             &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE);
536
537         if (sc->arge_res == NULL) {
538                 device_printf(dev, "couldn't map memory\n");
539                 error = ENXIO;
540                 goto fail;
541         }
542
543         /* Allocate interrupts */
544         rid = 0;
545         sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
546             RF_SHAREABLE | RF_ACTIVE);
547
548         if (sc->arge_irq == NULL) {
549                 device_printf(dev, "couldn't map interrupt\n");
550                 error = ENXIO;
551                 goto fail;
552         }
553
554         /* Allocate ifnet structure. */
555         ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
556
557         if (ifp == NULL) {
558                 device_printf(dev, "couldn't allocate ifnet structure\n");
559                 error = ENOSPC;
560                 goto fail;
561         }
562
563         ifp->if_softc = sc;
564         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
565         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
566         ifp->if_ioctl = arge_ioctl;
567         ifp->if_start = arge_start;
568         ifp->if_init = arge_init;
569         sc->arge_if_flags = ifp->if_flags;
570
571         /* XXX: add real size */
572         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
573         ifp->if_snd.ifq_maxlen = ifqmaxlen;
574         IFQ_SET_READY(&ifp->if_snd);
575
576         ifp->if_capenable = ifp->if_capabilities;
577 #ifdef DEVICE_POLLING
578         ifp->if_capabilities |= IFCAP_POLLING;
579 #endif
580
581         is_base_mac_empty = 1;
582         for (i = 0; i < ETHER_ADDR_LEN; i++) {
583                 sc->arge_eaddr[i] = ar711_base_mac[i] & 0xff;
584                 if (sc->arge_eaddr[i] != 0)
585                         is_base_mac_empty = 0;
586         }
587
588         if (is_base_mac_empty) {
589                 /*
590                  * No MAC address configured. Generate the random one.
591                  */
592                 if  (bootverbose)
593                         device_printf(dev,
594                             "Generating random ethernet address.\n");
595
596                 rnd = arc4random();
597                 sc->arge_eaddr[0] = 'b';
598                 sc->arge_eaddr[1] = 's';
599                 sc->arge_eaddr[2] = 'd';
600                 sc->arge_eaddr[3] = (rnd >> 24) & 0xff;
601                 sc->arge_eaddr[4] = (rnd >> 16) & 0xff;
602                 sc->arge_eaddr[5] = (rnd >> 8) & 0xff;
603         }
604         if (sc->arge_mac_unit != 0)
605                 sc->arge_eaddr[5] +=  sc->arge_mac_unit;
606
607         if (arge_dma_alloc(sc) != 0) {
608                 error = ENXIO;
609                 goto fail;
610         }
611
612         /*
613          * Don't do this for the MDIO bus case - it's already done
614          * as part of the MDIO bus attachment.
615          */
616 #if !defined(ARGE_MDIO)
617         /* Initialize the MAC block */
618         arge_reset_mac(sc);
619         arge_reset_miibus(sc);
620 #endif
621
622         /* Configure MII mode, just for convienence */
623         if (sc->arge_miicfg != 0)
624                 ar71xx_device_set_mii_if(sc->arge_mac_unit, sc->arge_miicfg);
625
626         /*
627          * Set all Ethernet address registers to the same initial values
628          * set all four addresses to 66-88-aa-cc-dd-ee
629          */
630         ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, (sc->arge_eaddr[2] << 24)
631             | (sc->arge_eaddr[3] << 16) | (sc->arge_eaddr[4] << 8)
632             | sc->arge_eaddr[5]);
633         ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (sc->arge_eaddr[0] << 8)
634             | sc->arge_eaddr[1]);
635
636         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0,
637             FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
638
639         switch (ar71xx_soc) {
640                 case AR71XX_SOC_AR7240:
641                 case AR71XX_SOC_AR7241:
642                 case AR71XX_SOC_AR7242:
643                 case AR71XX_SOC_AR9330:
644                 case AR71XX_SOC_AR9331:
645                 case AR71XX_SOC_AR9341:
646                 case AR71XX_SOC_AR9342:
647                 case AR71XX_SOC_AR9344:
648                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff);
649                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa);
650                         break;
651                 /* AR71xx, AR913x */
652                 default:
653                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
654                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
655         }
656
657         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH,
658             FIFO_RX_FILTMATCH_DEFAULT);
659
660         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
661             FIFO_RX_FILTMASK_DEFAULT);
662
663 #if defined(ARGE_MDIO)
664         sc->arge_miiproxy = mii_attach_proxy(sc->arge_dev);
665 #endif
666
667         device_printf(sc->arge_dev, "finishing attachment, phymask %04x"
668             ", proxy %s \n", sc->arge_phymask, sc->arge_miiproxy == NULL ?
669             "null" : "set");
670         for (i = 0; i < ARGE_NPHY; i++) {
671                 if (((1 << i) & sc->arge_phymask) != 0) {
672                         error = mii_attach(sc->arge_miiproxy != NULL ?
673                             sc->arge_miiproxy : sc->arge_dev,
674                             &sc->arge_miibus, sc->arge_ifp,
675                             arge_ifmedia_upd, arge_ifmedia_sts,
676                             BMSR_DEFCAPMASK, i, MII_OFFSET_ANY, 0);
677                         if (error != 0) {
678                                 device_printf(sc->arge_dev, "unable to attach"
679                                     " PHY %d: %d\n", i, error);
680                                 goto fail;
681                         }
682                 }
683         }
684         if (sc->arge_miibus == NULL) {
685                 /* no PHY, so use hard-coded values */
686                 ifmedia_init(&sc->arge_ifmedia, 0, 
687                     arge_multiphy_mediachange,
688                     arge_multiphy_mediastatus);
689                 ifmedia_add(&sc->arge_ifmedia,
690                     IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode,
691                     0, NULL);
692                 ifmedia_set(&sc->arge_ifmedia,
693                     IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode);
694                 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
695         }
696
697         /* Call MI attach routine. */
698         ether_ifattach(sc->arge_ifp, sc->arge_eaddr);
699
700         /* Hook interrupt last to avoid having to lock softc */
701         error = bus_setup_intr(sc->arge_dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
702             arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
703
704         if (error) {
705                 device_printf(sc->arge_dev, "couldn't set up irq\n");
706                 ether_ifdetach(sc->arge_ifp);
707                 goto fail;
708         }
709
710         /* setup sysctl variables */
711         arge_attach_sysctl(sc->arge_dev);
712
713 fail:
714         if (error) 
715                 arge_detach(dev);
716
717         return (error);
718 }
719
720 static int
721 arge_detach(device_t dev)
722 {
723         struct arge_softc       *sc = device_get_softc(dev);
724         struct ifnet            *ifp = sc->arge_ifp;
725
726         KASSERT(mtx_initialized(&sc->arge_mtx),
727             ("arge mutex not initialized"));
728
729         /* These should only be active if attach succeeded */
730         if (device_is_attached(dev)) {
731                 ARGE_LOCK(sc);
732                 sc->arge_detach = 1;
733 #ifdef DEVICE_POLLING
734                 if (ifp->if_capenable & IFCAP_POLLING)
735                         ether_poll_deregister(ifp);
736 #endif
737
738                 arge_stop(sc);
739                 ARGE_UNLOCK(sc);
740                 taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
741                 ether_ifdetach(ifp);
742         }
743
744         if (sc->arge_miibus)
745                 device_delete_child(dev, sc->arge_miibus);
746
747         if (sc->arge_miiproxy)
748                 device_delete_child(dev, sc->arge_miiproxy);
749
750         bus_generic_detach(dev);
751
752         if (sc->arge_intrhand)
753                 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
754
755         if (sc->arge_res)
756                 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid,
757                     sc->arge_res);
758
759         if (ifp)
760                 if_free(ifp);
761
762         arge_dma_free(sc);
763
764         mtx_destroy(&sc->arge_mtx);
765
766         return (0);
767
768 }
769
770 static int
771 arge_suspend(device_t dev)
772 {
773
774         panic("%s", __func__);
775         return 0;
776 }
777
778 static int
779 arge_resume(device_t dev)
780 {
781
782         panic("%s", __func__);
783         return 0;
784 }
785
786 static int
787 arge_shutdown(device_t dev)
788 {
789         struct arge_softc       *sc;
790
791         sc = device_get_softc(dev);
792
793         ARGE_LOCK(sc);
794         arge_stop(sc);
795         ARGE_UNLOCK(sc);
796
797         return (0);
798 }
799
800 static void
801 arge_hinted_child(device_t bus, const char *dname, int dunit)
802 {
803         BUS_ADD_CHILD(bus, 0, dname, dunit);
804         device_printf(bus, "hinted child %s%d\n", dname, dunit);
805 }
806
807 static int
808 arge_miibus_readreg(device_t dev, int phy, int reg)
809 {
810         struct arge_softc * sc = device_get_softc(dev);
811         int i, result;
812         uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT)
813             | (reg & MAC_MII_REG_MASK);
814
815         mtx_lock(&miibus_mtx);
816         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
817         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr);
818         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
819
820         i = ARGE_MII_TIMEOUT;
821         while ((ARGE_MDIO_READ(sc, AR71XX_MAC_MII_INDICATOR) & 
822             MAC_MII_INDICATOR_BUSY) && (i--))
823                 DELAY(5);
824
825         if (i < 0) {
826                 mtx_unlock(&miibus_mtx);
827                 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
828                 /* XXX: return ERRNO istead? */
829                 return (-1);
830         }
831
832         result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
833         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
834         mtx_unlock(&miibus_mtx);
835
836         ARGEDEBUG(sc, ARGE_DBG_MII,
837             "%s: phy=%d, reg=%02x, value[%08x]=%04x\n",
838             __func__, phy, reg, addr, result);
839
840         return (result);
841 }
842
843 static int
844 arge_miibus_writereg(device_t dev, int phy, int reg, int data)
845 {
846         struct arge_softc * sc = device_get_softc(dev);
847         int i;
848         uint32_t addr =
849             (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
850
851         ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__, 
852             phy, reg, data);
853
854         mtx_lock(&miibus_mtx);
855         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr);
856         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CONTROL, data);
857
858         i = ARGE_MII_TIMEOUT;
859         while ((ARGE_MDIO_READ(sc, AR71XX_MAC_MII_INDICATOR) & 
860             MAC_MII_INDICATOR_BUSY) && (i--))
861                 DELAY(5);
862
863         mtx_unlock(&miibus_mtx);
864
865         if (i < 0) {
866                 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
867                 /* XXX: return ERRNO istead? */
868                 return (-1);
869         }
870
871         return (0);
872 }
873
874 static void
875 arge_miibus_statchg(device_t dev)
876 {
877         struct arge_softc       *sc;
878
879         sc = device_get_softc(dev);
880         taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
881 }
882
883 static void
884 arge_link_task(void *arg, int pending)
885 {
886         struct arge_softc       *sc;
887         sc = (struct arge_softc *)arg;
888
889         ARGE_LOCK(sc);
890         arge_update_link_locked(sc);
891         ARGE_UNLOCK(sc);
892 }
893
894 static void
895 arge_update_link_locked(struct arge_softc *sc)
896 {
897         struct mii_data         *mii;
898         struct ifnet            *ifp;
899         uint32_t                media, duplex;
900
901         mii = device_get_softc(sc->arge_miibus);
902         ifp = sc->arge_ifp;
903         if (mii == NULL || ifp == NULL ||
904             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
905                 return;
906         }
907
908         if (mii->mii_media_status & IFM_ACTIVE) {
909
910                 media = IFM_SUBTYPE(mii->mii_media_active);
911                 if (media != IFM_NONE) {
912                         sc->arge_link_status = 1;
913                         duplex = mii->mii_media_active & IFM_GMASK;
914                         ARGEDEBUG(sc, ARGE_DBG_MII, "%s: media=%d, duplex=%d\n",
915                             __func__,
916                             media,
917                             duplex);
918                         arge_set_pll(sc, media, duplex);
919                 }
920         } else {
921                 sc->arge_link_status = 0;
922         }
923 }
924
925 static void
926 arge_set_pll(struct arge_softc *sc, int media, int duplex)
927 {
928         uint32_t                cfg, ifcontrol, rx_filtmask;
929         uint32_t                fifo_tx, pll;
930         int if_speed;
931
932         ARGEDEBUG(sc, ARGE_DBG_PLL, "set_pll(%04x, %s)\n", media,
933             duplex == IFM_FDX ? "full" : "half");
934         cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
935         cfg &= ~(MAC_CFG2_IFACE_MODE_1000
936             | MAC_CFG2_IFACE_MODE_10_100
937             | MAC_CFG2_FULL_DUPLEX);
938
939         if (duplex == IFM_FDX)
940                 cfg |= MAC_CFG2_FULL_DUPLEX;
941
942         ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
943         ifcontrol &= ~MAC_IFCONTROL_SPEED;
944         rx_filtmask =
945             ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
946         rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
947
948         switch(media) {
949         case IFM_10_T:
950                 cfg |= MAC_CFG2_IFACE_MODE_10_100;
951                 if_speed = 10;
952                 break;
953         case IFM_100_TX:
954                 cfg |= MAC_CFG2_IFACE_MODE_10_100;
955                 ifcontrol |= MAC_IFCONTROL_SPEED;
956                 if_speed = 100;
957                 break;
958         case IFM_1000_T:
959         case IFM_1000_SX:
960                 cfg |= MAC_CFG2_IFACE_MODE_1000;
961                 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
962                 if_speed = 1000;
963                 break;
964         default:
965                 if_speed = 100;
966                 device_printf(sc->arge_dev,
967                     "Unknown media %d\n", media);
968         }
969
970         ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: if_speed=%d\n", __func__, if_speed);
971
972         switch (ar71xx_soc) {
973                 case AR71XX_SOC_AR7240:
974                 case AR71XX_SOC_AR7241:
975                 case AR71XX_SOC_AR7242:
976                 case AR71XX_SOC_AR9330:
977                 case AR71XX_SOC_AR9331:
978                 case AR71XX_SOC_AR9341:
979                 case AR71XX_SOC_AR9342:
980                 case AR71XX_SOC_AR9344:
981                         fifo_tx = 0x01f00140;
982                         break;
983                 case AR71XX_SOC_AR9130:
984                 case AR71XX_SOC_AR9132:
985                         fifo_tx = 0x00780fff;
986                         break;
987                 /* AR71xx */
988                 default:
989                         fifo_tx = 0x008001ff;
990         }
991
992         ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
993         ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
994         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
995             rx_filtmask);
996         ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx);
997
998         /* fetch PLL registers */
999         pll = ar71xx_device_get_eth_pll(sc->arge_mac_unit, if_speed);
1000         ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: pll=0x%x\n", __func__, pll);
1001
1002         /* Override if required by platform data */
1003         if (if_speed == 10 && sc->arge_pllcfg.pll_10 != 0)
1004                 pll = sc->arge_pllcfg.pll_10;
1005         else if (if_speed == 100 && sc->arge_pllcfg.pll_100 != 0)
1006                 pll = sc->arge_pllcfg.pll_100;
1007         else if (if_speed == 1000 && sc->arge_pllcfg.pll_1000 != 0)
1008                 pll = sc->arge_pllcfg.pll_1000;
1009         ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: final pll=0x%x\n", __func__, pll);
1010
1011         /* XXX ensure pll != 0 */
1012         ar71xx_device_set_pll_ge(sc->arge_mac_unit, if_speed, pll);
1013
1014         /* set MII registers */
1015         /*
1016          * This was introduced to match what the Linux ag71xx ethernet
1017          * driver does.  For the AR71xx case, it does set the port
1018          * MII speed.  However, if this is done, non-gigabit speeds
1019          * are not at all reliable when speaking via RGMII through
1020          * 'bridge' PHY port that's pretending to be a local PHY.
1021          *
1022          * Until that gets root caused, and until an AR71xx + normal
1023          * PHY board is tested, leave this disabled.
1024          */
1025 #if 0
1026         ar71xx_device_set_mii_speed(sc->arge_mac_unit, if_speed);
1027 #endif
1028 }
1029
1030
1031 static void
1032 arge_reset_dma(struct arge_softc *sc)
1033 {
1034         ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
1035         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
1036
1037         ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
1038         ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
1039
1040         /* Clear all possible RX interrupts */
1041         while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
1042                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1043
1044         /*
1045          * Clear all possible TX interrupts
1046          */
1047         while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
1048                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1049
1050         /*
1051          * Now Rx/Tx errors
1052          */
1053         ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS,
1054             DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
1055         ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS,
1056             DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
1057
1058         /*
1059          * Force a DDR flush so any pending data is properly
1060          * flushed to RAM before underlying buffers are freed.
1061          */
1062         arge_flush_ddr(sc);
1063 }
1064
1065
1066
1067 static void
1068 arge_init(void *xsc)
1069 {
1070         struct arge_softc        *sc = xsc;
1071
1072         ARGE_LOCK(sc);
1073         arge_init_locked(sc);
1074         ARGE_UNLOCK(sc);
1075 }
1076
1077 static void
1078 arge_init_locked(struct arge_softc *sc)
1079 {
1080         struct ifnet            *ifp = sc->arge_ifp;
1081         struct mii_data         *mii;
1082
1083         ARGE_LOCK_ASSERT(sc);
1084
1085         if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1086                 return;
1087
1088         /* Init circular RX list. */
1089         if (arge_rx_ring_init(sc) != 0) {
1090                 device_printf(sc->arge_dev,
1091                     "initialization failed: no memory for rx buffers\n");
1092                 arge_stop(sc);
1093                 return;
1094         }
1095
1096         /* Init tx descriptors. */
1097         arge_tx_ring_init(sc);
1098
1099         arge_reset_dma(sc);
1100
1101         if (sc->arge_miibus) {
1102                 mii = device_get_softc(sc->arge_miibus);
1103                 mii_mediachg(mii);
1104         }
1105         else {
1106                 /*
1107                  * Sun always shines over multiPHY interface
1108                  */
1109                 sc->arge_link_status = 1;
1110         }
1111
1112         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1113         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1114
1115         if (sc->arge_miibus) {
1116                 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1117                 arge_update_link_locked(sc);
1118         }
1119
1120         ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
1121         ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
1122
1123         /* Start listening */
1124         ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1125
1126         /* Enable interrupts */
1127         ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1128 }
1129
1130 /*
1131  * Return whether the mbuf chain is correctly aligned
1132  * for the arge TX engine.
1133  *
1134  * The TX engine requires each fragment to be aligned to a
1135  * 4 byte boundary and the size of each fragment except
1136  * the last to be a multiple of 4 bytes.
1137  */
1138 static int
1139 arge_mbuf_chain_is_tx_aligned(struct mbuf *m0)
1140 {
1141         struct mbuf *m;
1142
1143         for (m = m0; m != NULL; m = m->m_next) {
1144                 if((mtod(m, intptr_t) & 3) != 0)
1145                         return 0;
1146                 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0))
1147                         return 0;
1148         }
1149         return 1;
1150 }
1151
1152 /*
1153  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1154  * pointers to the fragment pointers.
1155  */
1156 static int
1157 arge_encap(struct arge_softc *sc, struct mbuf **m_head)
1158 {
1159         struct arge_txdesc      *txd;
1160         struct arge_desc        *desc, *prev_desc;
1161         bus_dma_segment_t       txsegs[ARGE_MAXFRAGS];
1162         int                     error, i, nsegs, prod, prev_prod;
1163         struct mbuf             *m;
1164
1165         ARGE_LOCK_ASSERT(sc);
1166
1167         /*
1168          * Fix mbuf chain, all fragments should be 4 bytes aligned and
1169          * even 4 bytes
1170          */
1171         m = *m_head;
1172         if (! arge_mbuf_chain_is_tx_aligned(m)) {
1173                 sc->stats.tx_pkts_unaligned++;
1174                 m = m_defrag(*m_head, M_NOWAIT);
1175                 if (m == NULL) {
1176                         *m_head = NULL;
1177                         return (ENOBUFS);
1178                 }
1179                 *m_head = m;
1180         } else
1181                 sc->stats.tx_pkts_aligned++;
1182
1183         prod = sc->arge_cdata.arge_tx_prod;
1184         txd = &sc->arge_cdata.arge_txdesc[prod];
1185         error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag,
1186             txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1187
1188         if (error == EFBIG) {
1189                 panic("EFBIG");
1190         } else if (error != 0)
1191                 return (error);
1192
1193         if (nsegs == 0) {
1194                 m_freem(*m_head);
1195                 *m_head = NULL;
1196                 return (EIO);
1197         }
1198
1199         /* Check number of available descriptors. */
1200         if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) {
1201                 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1202                 return (ENOBUFS);
1203         }
1204
1205         txd->tx_m = *m_head;
1206         bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1207             BUS_DMASYNC_PREWRITE);
1208
1209         /*
1210          * Make a list of descriptors for this packet. DMA controller will
1211          * walk through it while arge_link is not zero.
1212          */
1213         prev_prod = prod;
1214         desc = prev_desc = NULL;
1215         for (i = 0; i < nsegs; i++) {
1216                 desc = &sc->arge_rdata.arge_tx_ring[prod];
1217                 desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len);
1218
1219                 if (txsegs[i].ds_addr & 3)
1220                         panic("TX packet address unaligned\n");
1221
1222                 desc->packet_addr = txsegs[i].ds_addr;
1223
1224                 /* link with previous descriptor */
1225                 if (prev_desc)
1226                         prev_desc->packet_ctrl |= ARGE_DESC_MORE;
1227
1228                 sc->arge_cdata.arge_tx_cnt++;
1229                 prev_desc = desc;
1230                 ARGE_INC(prod, ARGE_TX_RING_COUNT);
1231         }
1232
1233         /* Update producer index. */
1234         sc->arge_cdata.arge_tx_prod = prod;
1235
1236         /* Sync descriptors. */
1237         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1238             sc->arge_cdata.arge_tx_ring_map,
1239             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1240
1241         /* Start transmitting */
1242         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: setting DMA_TX_CONTROL_EN\n",
1243             __func__);
1244         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
1245         return (0);
1246 }
1247
1248 static void
1249 arge_start(struct ifnet *ifp)
1250 {
1251         struct arge_softc        *sc;
1252
1253         sc = ifp->if_softc;
1254
1255         ARGE_LOCK(sc);
1256         arge_start_locked(ifp);
1257         ARGE_UNLOCK(sc);
1258 }
1259
1260 static void
1261 arge_start_locked(struct ifnet *ifp)
1262 {
1263         struct arge_softc       *sc;
1264         struct mbuf             *m_head;
1265         int                     enq = 0;
1266
1267         sc = ifp->if_softc;
1268
1269         ARGE_LOCK_ASSERT(sc);
1270
1271         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: beginning\n", __func__);
1272
1273         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1274             IFF_DRV_RUNNING || sc->arge_link_status == 0 )
1275                 return;
1276
1277         /*
1278          * Before we go any further, check whether we're already full.
1279          * The below check errors out immediately if the ring is full
1280          * and never gets a chance to set this flag. Although it's
1281          * likely never needed, this at least avoids an unexpected
1282          * situation.
1283          */
1284         if (sc->arge_cdata.arge_tx_cnt >= ARGE_TX_RING_COUNT - 2) {
1285                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1286                 ARGEDEBUG(sc, ARGE_DBG_ERR,
1287                     "%s: tx_cnt %d >= max %d; setting IFF_DRV_OACTIVE\n",
1288                     __func__, sc->arge_cdata.arge_tx_cnt,
1289                     ARGE_TX_RING_COUNT - 2);
1290                 return;
1291         }
1292
1293         arge_flush_ddr(sc);
1294
1295         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1296             sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
1297                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1298                 if (m_head == NULL)
1299                         break;
1300
1301
1302                 /*
1303                  * Pack the data into the transmit ring.
1304                  */
1305                 if (arge_encap(sc, &m_head)) {
1306                         if (m_head == NULL)
1307                                 break;
1308                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1309                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1310                         break;
1311                 }
1312
1313                 enq++;
1314                 /*
1315                  * If there's a BPF listener, bounce a copy of this frame
1316                  * to him.
1317                  */
1318                 ETHER_BPF_MTAP(ifp, m_head);
1319         }
1320         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: finished; queued %d packets\n",
1321             __func__, enq);
1322 }
1323
1324 static void
1325 arge_stop(struct arge_softc *sc)
1326 {
1327         struct ifnet        *ifp;
1328
1329         ARGE_LOCK_ASSERT(sc);
1330
1331         ifp = sc->arge_ifp;
1332         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1333         if (sc->arge_miibus)
1334                 callout_stop(&sc->arge_stat_callout);
1335
1336         /* mask out interrupts */
1337         ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1338
1339         arge_reset_dma(sc);
1340
1341         /* Flush FIFO and free any existing mbufs */
1342         arge_flush_ddr(sc);
1343         arge_rx_ring_free(sc);
1344         arge_tx_ring_free(sc);
1345 }
1346
1347
1348 static int
1349 arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1350 {
1351         struct arge_softc               *sc = ifp->if_softc;
1352         struct ifreq            *ifr = (struct ifreq *) data;
1353         struct mii_data         *mii;
1354         int                     error;
1355 #ifdef DEVICE_POLLING
1356         int                     mask;
1357 #endif
1358
1359         switch (command) {
1360         case SIOCSIFFLAGS:
1361                 ARGE_LOCK(sc);
1362                 if ((ifp->if_flags & IFF_UP) != 0) {
1363                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1364                                 if (((ifp->if_flags ^ sc->arge_if_flags)
1365                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1366                                         /* XXX: handle promisc & multi flags */
1367                                 }
1368
1369                         } else {
1370                                 if (!sc->arge_detach)
1371                                         arge_init_locked(sc);
1372                         }
1373                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1374                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1375                         arge_stop(sc);
1376                 }
1377                 sc->arge_if_flags = ifp->if_flags;
1378                 ARGE_UNLOCK(sc);
1379                 error = 0;
1380                 break;
1381         case SIOCADDMULTI:
1382         case SIOCDELMULTI:
1383                 /* XXX: implement SIOCDELMULTI */
1384                 error = 0;
1385                 break;
1386         case SIOCGIFMEDIA:
1387         case SIOCSIFMEDIA:
1388                 if (sc->arge_miibus) {
1389                         mii = device_get_softc(sc->arge_miibus);
1390                         error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
1391                             command);
1392                 }
1393                 else
1394                         error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia,
1395                             command);
1396                 break;
1397         case SIOCSIFCAP:
1398                 /* XXX: Check other capabilities */
1399 #ifdef DEVICE_POLLING
1400                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1401                 if (mask & IFCAP_POLLING) {
1402                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1403                                 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1404                                 error = ether_poll_register(arge_poll, ifp);
1405                                 if (error)
1406                                         return error;
1407                                 ARGE_LOCK(sc);
1408                                 ifp->if_capenable |= IFCAP_POLLING;
1409                                 ARGE_UNLOCK(sc);
1410                         } else {
1411                                 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1412                                 error = ether_poll_deregister(ifp);
1413                                 ARGE_LOCK(sc);
1414                                 ifp->if_capenable &= ~IFCAP_POLLING;
1415                                 ARGE_UNLOCK(sc);
1416                         }
1417                 }
1418                 error = 0;
1419                 break;
1420 #endif
1421         default:
1422                 error = ether_ioctl(ifp, command, data);
1423                 break;
1424         }
1425
1426         return (error);
1427 }
1428
1429 /*
1430  * Set media options.
1431  */
1432 static int
1433 arge_ifmedia_upd(struct ifnet *ifp)
1434 {
1435         struct arge_softc               *sc;
1436         struct mii_data         *mii;
1437         struct mii_softc        *miisc;
1438         int                     error;
1439
1440         sc = ifp->if_softc;
1441         ARGE_LOCK(sc);
1442         mii = device_get_softc(sc->arge_miibus);
1443         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1444                 PHY_RESET(miisc);
1445         error = mii_mediachg(mii);
1446         ARGE_UNLOCK(sc);
1447
1448         return (error);
1449 }
1450
1451 /*
1452  * Report current media status.
1453  */
1454 static void
1455 arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1456 {
1457         struct arge_softc               *sc = ifp->if_softc;
1458         struct mii_data         *mii;
1459
1460         mii = device_get_softc(sc->arge_miibus);
1461         ARGE_LOCK(sc);
1462         mii_pollstat(mii);
1463         ifmr->ifm_active = mii->mii_media_active;
1464         ifmr->ifm_status = mii->mii_media_status;
1465         ARGE_UNLOCK(sc);
1466 }
1467
1468 struct arge_dmamap_arg {
1469         bus_addr_t      arge_busaddr;
1470 };
1471
1472 static void
1473 arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1474 {
1475         struct arge_dmamap_arg  *ctx;
1476
1477         if (error != 0)
1478                 return;
1479         ctx = arg;
1480         ctx->arge_busaddr = segs[0].ds_addr;
1481 }
1482
1483 static int
1484 arge_dma_alloc(struct arge_softc *sc)
1485 {
1486         struct arge_dmamap_arg  ctx;
1487         struct arge_txdesc      *txd;
1488         struct arge_rxdesc      *rxd;
1489         int                     error, i;
1490
1491         /* Create parent DMA tag. */
1492         error = bus_dma_tag_create(
1493             bus_get_dma_tag(sc->arge_dev),      /* parent */
1494             1, 0,                       /* alignment, boundary */
1495             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1496             BUS_SPACE_MAXADDR,          /* highaddr */
1497             NULL, NULL,                 /* filter, filterarg */
1498             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1499             0,                          /* nsegments */
1500             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1501             0,                          /* flags */
1502             NULL, NULL,                 /* lockfunc, lockarg */
1503             &sc->arge_cdata.arge_parent_tag);
1504         if (error != 0) {
1505                 device_printf(sc->arge_dev,
1506                     "failed to create parent DMA tag\n");
1507                 goto fail;
1508         }
1509         /* Create tag for Tx ring. */
1510         error = bus_dma_tag_create(
1511             sc->arge_cdata.arge_parent_tag,     /* parent */
1512             ARGE_RING_ALIGN, 0,         /* alignment, boundary */
1513             BUS_SPACE_MAXADDR,          /* lowaddr */
1514             BUS_SPACE_MAXADDR,          /* highaddr */
1515             NULL, NULL,                 /* filter, filterarg */
1516             ARGE_TX_DMA_SIZE,           /* maxsize */
1517             1,                          /* nsegments */
1518             ARGE_TX_DMA_SIZE,           /* maxsegsize */
1519             0,                          /* flags */
1520             NULL, NULL,                 /* lockfunc, lockarg */
1521             &sc->arge_cdata.arge_tx_ring_tag);
1522         if (error != 0) {
1523                 device_printf(sc->arge_dev,
1524                     "failed to create Tx ring DMA tag\n");
1525                 goto fail;
1526         }
1527
1528         /* Create tag for Rx ring. */
1529         error = bus_dma_tag_create(
1530             sc->arge_cdata.arge_parent_tag,     /* parent */
1531             ARGE_RING_ALIGN, 0,         /* alignment, boundary */
1532             BUS_SPACE_MAXADDR,          /* lowaddr */
1533             BUS_SPACE_MAXADDR,          /* highaddr */
1534             NULL, NULL,                 /* filter, filterarg */
1535             ARGE_RX_DMA_SIZE,           /* maxsize */
1536             1,                          /* nsegments */
1537             ARGE_RX_DMA_SIZE,           /* maxsegsize */
1538             0,                          /* flags */
1539             NULL, NULL,                 /* lockfunc, lockarg */
1540             &sc->arge_cdata.arge_rx_ring_tag);
1541         if (error != 0) {
1542                 device_printf(sc->arge_dev,
1543                     "failed to create Rx ring DMA tag\n");
1544                 goto fail;
1545         }
1546
1547         /* Create tag for Tx buffers. */
1548         error = bus_dma_tag_create(
1549             sc->arge_cdata.arge_parent_tag,     /* parent */
1550             sizeof(uint32_t), 0,        /* alignment, boundary */
1551             BUS_SPACE_MAXADDR,          /* lowaddr */
1552             BUS_SPACE_MAXADDR,          /* highaddr */
1553             NULL, NULL,                 /* filter, filterarg */
1554             MCLBYTES * ARGE_MAXFRAGS,   /* maxsize */
1555             ARGE_MAXFRAGS,              /* nsegments */
1556             MCLBYTES,                   /* maxsegsize */
1557             0,                          /* flags */
1558             NULL, NULL,                 /* lockfunc, lockarg */
1559             &sc->arge_cdata.arge_tx_tag);
1560         if (error != 0) {
1561                 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1562                 goto fail;
1563         }
1564
1565         /* Create tag for Rx buffers. */
1566         error = bus_dma_tag_create(
1567             sc->arge_cdata.arge_parent_tag,     /* parent */
1568             ARGE_RX_ALIGN, 0,           /* alignment, boundary */
1569             BUS_SPACE_MAXADDR,          /* lowaddr */
1570             BUS_SPACE_MAXADDR,          /* highaddr */
1571             NULL, NULL,                 /* filter, filterarg */
1572             MCLBYTES,                   /* maxsize */
1573             ARGE_MAXFRAGS,              /* nsegments */
1574             MCLBYTES,                   /* maxsegsize */
1575             0,                          /* flags */
1576             NULL, NULL,                 /* lockfunc, lockarg */
1577             &sc->arge_cdata.arge_rx_tag);
1578         if (error != 0) {
1579                 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1580                 goto fail;
1581         }
1582
1583         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1584         error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1585             (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1586             BUS_DMA_COHERENT | BUS_DMA_ZERO,
1587             &sc->arge_cdata.arge_tx_ring_map);
1588         if (error != 0) {
1589                 device_printf(sc->arge_dev,
1590                     "failed to allocate DMA'able memory for Tx ring\n");
1591                 goto fail;
1592         }
1593
1594         ctx.arge_busaddr = 0;
1595         error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1596             sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1597             ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1598         if (error != 0 || ctx.arge_busaddr == 0) {
1599                 device_printf(sc->arge_dev,
1600                     "failed to load DMA'able memory for Tx ring\n");
1601                 goto fail;
1602         }
1603         sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1604
1605         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1606         error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1607             (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1608             BUS_DMA_COHERENT | BUS_DMA_ZERO,
1609             &sc->arge_cdata.arge_rx_ring_map);
1610         if (error != 0) {
1611                 device_printf(sc->arge_dev,
1612                     "failed to allocate DMA'able memory for Rx ring\n");
1613                 goto fail;
1614         }
1615
1616         ctx.arge_busaddr = 0;
1617         error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1618             sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1619             ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1620         if (error != 0 || ctx.arge_busaddr == 0) {
1621                 device_printf(sc->arge_dev,
1622                     "failed to load DMA'able memory for Rx ring\n");
1623                 goto fail;
1624         }
1625         sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
1626
1627         /* Create DMA maps for Tx buffers. */
1628         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1629                 txd = &sc->arge_cdata.arge_txdesc[i];
1630                 txd->tx_m = NULL;
1631                 txd->tx_dmamap = NULL;
1632                 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
1633                     &txd->tx_dmamap);
1634                 if (error != 0) {
1635                         device_printf(sc->arge_dev,
1636                             "failed to create Tx dmamap\n");
1637                         goto fail;
1638                 }
1639         }
1640         /* Create DMA maps for Rx buffers. */
1641         if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1642             &sc->arge_cdata.arge_rx_sparemap)) != 0) {
1643                 device_printf(sc->arge_dev,
1644                     "failed to create spare Rx dmamap\n");
1645                 goto fail;
1646         }
1647         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1648                 rxd = &sc->arge_cdata.arge_rxdesc[i];
1649                 rxd->rx_m = NULL;
1650                 rxd->rx_dmamap = NULL;
1651                 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1652                     &rxd->rx_dmamap);
1653                 if (error != 0) {
1654                         device_printf(sc->arge_dev,
1655                             "failed to create Rx dmamap\n");
1656                         goto fail;
1657                 }
1658         }
1659
1660 fail:
1661         return (error);
1662 }
1663
1664 static void
1665 arge_dma_free(struct arge_softc *sc)
1666 {
1667         struct arge_txdesc      *txd;
1668         struct arge_rxdesc      *rxd;
1669         int                     i;
1670
1671         /* Tx ring. */
1672         if (sc->arge_cdata.arge_tx_ring_tag) {
1673                 if (sc->arge_cdata.arge_tx_ring_map)
1674                         bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
1675                             sc->arge_cdata.arge_tx_ring_map);
1676                 if (sc->arge_cdata.arge_tx_ring_map &&
1677                     sc->arge_rdata.arge_tx_ring)
1678                         bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
1679                             sc->arge_rdata.arge_tx_ring,
1680                             sc->arge_cdata.arge_tx_ring_map);
1681                 sc->arge_rdata.arge_tx_ring = NULL;
1682                 sc->arge_cdata.arge_tx_ring_map = NULL;
1683                 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
1684                 sc->arge_cdata.arge_tx_ring_tag = NULL;
1685         }
1686         /* Rx ring. */
1687         if (sc->arge_cdata.arge_rx_ring_tag) {
1688                 if (sc->arge_cdata.arge_rx_ring_map)
1689                         bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
1690                             sc->arge_cdata.arge_rx_ring_map);
1691                 if (sc->arge_cdata.arge_rx_ring_map &&
1692                     sc->arge_rdata.arge_rx_ring)
1693                         bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
1694                             sc->arge_rdata.arge_rx_ring,
1695                             sc->arge_cdata.arge_rx_ring_map);
1696                 sc->arge_rdata.arge_rx_ring = NULL;
1697                 sc->arge_cdata.arge_rx_ring_map = NULL;
1698                 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
1699                 sc->arge_cdata.arge_rx_ring_tag = NULL;
1700         }
1701         /* Tx buffers. */
1702         if (sc->arge_cdata.arge_tx_tag) {
1703                 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1704                         txd = &sc->arge_cdata.arge_txdesc[i];
1705                         if (txd->tx_dmamap) {
1706                                 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
1707                                     txd->tx_dmamap);
1708                                 txd->tx_dmamap = NULL;
1709                         }
1710                 }
1711                 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
1712                 sc->arge_cdata.arge_tx_tag = NULL;
1713         }
1714         /* Rx buffers. */
1715         if (sc->arge_cdata.arge_rx_tag) {
1716                 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1717                         rxd = &sc->arge_cdata.arge_rxdesc[i];
1718                         if (rxd->rx_dmamap) {
1719                                 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1720                                     rxd->rx_dmamap);
1721                                 rxd->rx_dmamap = NULL;
1722                         }
1723                 }
1724                 if (sc->arge_cdata.arge_rx_sparemap) {
1725                         bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1726                             sc->arge_cdata.arge_rx_sparemap);
1727                         sc->arge_cdata.arge_rx_sparemap = 0;
1728                 }
1729                 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
1730                 sc->arge_cdata.arge_rx_tag = NULL;
1731         }
1732
1733         if (sc->arge_cdata.arge_parent_tag) {
1734                 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
1735                 sc->arge_cdata.arge_parent_tag = NULL;
1736         }
1737 }
1738
1739 /*
1740  * Initialize the transmit descriptors.
1741  */
1742 static int
1743 arge_tx_ring_init(struct arge_softc *sc)
1744 {
1745         struct arge_ring_data   *rd;
1746         struct arge_txdesc      *txd;
1747         bus_addr_t              addr;
1748         int                     i;
1749
1750         sc->arge_cdata.arge_tx_prod = 0;
1751         sc->arge_cdata.arge_tx_cons = 0;
1752         sc->arge_cdata.arge_tx_cnt = 0;
1753
1754         rd = &sc->arge_rdata;
1755         bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
1756         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1757                 if (i == ARGE_TX_RING_COUNT - 1)
1758                         addr = ARGE_TX_RING_ADDR(sc, 0);
1759                 else
1760                         addr = ARGE_TX_RING_ADDR(sc, i + 1);
1761                 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
1762                 rd->arge_tx_ring[i].next_desc = addr;
1763                 txd = &sc->arge_cdata.arge_txdesc[i];
1764                 txd->tx_m = NULL;
1765         }
1766
1767         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1768             sc->arge_cdata.arge_tx_ring_map,
1769             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1770
1771         return (0);
1772 }
1773
1774 /*
1775  * Free the Tx ring, unload any pending dma transaction and free the mbuf.
1776  */
1777 static void
1778 arge_tx_ring_free(struct arge_softc *sc)
1779 {
1780         struct arge_txdesc      *txd;
1781         int                     i;
1782
1783         /* Free the Tx buffers. */
1784         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1785                 txd = &sc->arge_cdata.arge_txdesc[i];
1786                 if (txd->tx_dmamap) {
1787                         bus_dmamap_sync(sc->arge_cdata.arge_tx_tag,
1788                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
1789                         bus_dmamap_unload(sc->arge_cdata.arge_tx_tag,
1790                             txd->tx_dmamap);
1791                 }
1792                 if (txd->tx_m)
1793                         m_freem(txd->tx_m);
1794                 txd->tx_m = NULL;
1795         }
1796 }
1797
1798 /*
1799  * Initialize the RX descriptors and allocate mbufs for them. Note that
1800  * we arrange the descriptors in a closed ring, so that the last descriptor
1801  * points back to the first.
1802  */
1803 static int
1804 arge_rx_ring_init(struct arge_softc *sc)
1805 {
1806         struct arge_ring_data   *rd;
1807         struct arge_rxdesc      *rxd;
1808         bus_addr_t              addr;
1809         int                     i;
1810
1811         sc->arge_cdata.arge_rx_cons = 0;
1812
1813         rd = &sc->arge_rdata;
1814         bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
1815         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1816                 rxd = &sc->arge_cdata.arge_rxdesc[i];
1817                 if (rxd->rx_m != NULL) {
1818                         device_printf(sc->arge_dev,
1819                             "%s: ring[%d] rx_m wasn't free?\n",
1820                             __func__,
1821                             i);
1822                 }
1823                 rxd->rx_m = NULL;
1824                 rxd->desc = &rd->arge_rx_ring[i];
1825                 if (i == ARGE_RX_RING_COUNT - 1)
1826                         addr = ARGE_RX_RING_ADDR(sc, 0);
1827                 else
1828                         addr = ARGE_RX_RING_ADDR(sc, i + 1);
1829                 rd->arge_rx_ring[i].next_desc = addr;
1830                 if (arge_newbuf(sc, i) != 0) {
1831                         return (ENOBUFS);
1832                 }
1833         }
1834
1835         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1836             sc->arge_cdata.arge_rx_ring_map,
1837             BUS_DMASYNC_PREWRITE);
1838
1839         return (0);
1840 }
1841
1842 /*
1843  * Free all the buffers in the RX ring.
1844  *
1845  * TODO: ensure that DMA is disabled and no pending DMA
1846  * is lurking in the FIFO.
1847  */
1848 static void
1849 arge_rx_ring_free(struct arge_softc *sc)
1850 {
1851         int i;
1852         struct arge_rxdesc      *rxd;
1853
1854         ARGE_LOCK_ASSERT(sc);
1855
1856         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1857                 rxd = &sc->arge_cdata.arge_rxdesc[i];
1858                 /* Unmap the mbuf */
1859                 if (rxd->rx_m != NULL) {
1860                         bus_dmamap_unload(sc->arge_cdata.arge_rx_tag,
1861                             rxd->rx_dmamap);
1862                         m_free(rxd->rx_m);
1863                         rxd->rx_m = NULL;
1864                 }
1865         }
1866 }
1867
1868 /*
1869  * Initialize an RX descriptor and attach an MBUF cluster.
1870  */
1871 static int
1872 arge_newbuf(struct arge_softc *sc, int idx)
1873 {
1874         struct arge_desc                *desc;
1875         struct arge_rxdesc      *rxd;
1876         struct mbuf             *m;
1877         bus_dma_segment_t       segs[1];
1878         bus_dmamap_t            map;
1879         int                     nsegs;
1880
1881         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1882         if (m == NULL)
1883                 return (ENOBUFS);
1884         m->m_len = m->m_pkthdr.len = MCLBYTES;
1885         m_adj(m, sizeof(uint64_t));
1886
1887         if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
1888             sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1889                 m_freem(m);
1890                 return (ENOBUFS);
1891         }
1892         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1893
1894         rxd = &sc->arge_cdata.arge_rxdesc[idx];
1895         if (rxd->rx_m != NULL) {
1896                 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
1897         }
1898         map = rxd->rx_dmamap;
1899         rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
1900         sc->arge_cdata.arge_rx_sparemap = map;
1901         rxd->rx_m = m;
1902         desc = rxd->desc;
1903         if (segs[0].ds_addr & 3)
1904                 panic("RX packet address unaligned");
1905         desc->packet_addr = segs[0].ds_addr;
1906         desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
1907
1908         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1909             sc->arge_cdata.arge_rx_ring_map,
1910             BUS_DMASYNC_PREWRITE);
1911
1912         return (0);
1913 }
1914
1915 static __inline void
1916 arge_fixup_rx(struct mbuf *m)
1917 {
1918         int             i;
1919         uint16_t        *src, *dst;
1920
1921         src = mtod(m, uint16_t *);
1922         dst = src - 1;
1923
1924         for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1925                 *dst++ = *src++;
1926         }
1927
1928         if (m->m_len % sizeof(uint16_t))
1929                 *(uint8_t *)dst = *(uint8_t *)src;
1930
1931         m->m_data -= ETHER_ALIGN;
1932 }
1933
1934 #ifdef DEVICE_POLLING
1935 static int
1936 arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1937 {
1938         struct arge_softc *sc = ifp->if_softc;
1939         int rx_npkts = 0;
1940
1941         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1942                 ARGE_LOCK(sc);
1943                 arge_tx_locked(sc);
1944                 rx_npkts = arge_rx_locked(sc);
1945                 ARGE_UNLOCK(sc);
1946         }
1947
1948         return (rx_npkts);
1949 }
1950 #endif /* DEVICE_POLLING */
1951
1952
1953 static void
1954 arge_tx_locked(struct arge_softc *sc)
1955 {
1956         struct arge_txdesc      *txd;
1957         struct arge_desc        *cur_tx;
1958         struct ifnet            *ifp;
1959         uint32_t                ctrl;
1960         int                     cons, prod;
1961
1962         ARGE_LOCK_ASSERT(sc);
1963
1964         cons = sc->arge_cdata.arge_tx_cons;
1965         prod = sc->arge_cdata.arge_tx_prod;
1966
1967         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: cons=%d, prod=%d\n", __func__, cons,
1968             prod);
1969
1970         if (cons == prod)
1971                 return;
1972
1973         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1974             sc->arge_cdata.arge_tx_ring_map,
1975             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1976
1977         ifp = sc->arge_ifp;
1978         /*
1979          * Go through our tx list and free mbufs for those
1980          * frames that have been transmitted.
1981          */
1982         for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
1983                 cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
1984                 ctrl = cur_tx->packet_ctrl;
1985                 /* Check if descriptor has "finished" flag */
1986                 if ((ctrl & ARGE_DESC_EMPTY) == 0)
1987                         break;
1988
1989                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1990
1991                 sc->arge_cdata.arge_tx_cnt--;
1992                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1993
1994                 txd = &sc->arge_cdata.arge_txdesc[cons];
1995
1996                 ifp->if_opackets++;
1997
1998                 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1999                     BUS_DMASYNC_POSTWRITE);
2000                 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
2001
2002                 /* Free only if it's first descriptor in list */
2003                 if (txd->tx_m)
2004                         m_freem(txd->tx_m);
2005                 txd->tx_m = NULL;
2006
2007                 /* reset descriptor */
2008                 cur_tx->packet_addr = 0;
2009         }
2010
2011         sc->arge_cdata.arge_tx_cons = cons;
2012
2013         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
2014             sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
2015 }
2016
2017
2018 static int
2019 arge_rx_locked(struct arge_softc *sc)
2020 {
2021         struct arge_rxdesc      *rxd;
2022         struct ifnet            *ifp = sc->arge_ifp;
2023         int                     cons, prog, packet_len, i;
2024         struct arge_desc        *cur_rx;
2025         struct mbuf             *m;
2026         int                     rx_npkts = 0;
2027
2028         ARGE_LOCK_ASSERT(sc);
2029
2030         cons = sc->arge_cdata.arge_rx_cons;
2031
2032         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
2033             sc->arge_cdata.arge_rx_ring_map,
2034             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2035
2036         for (prog = 0; prog < ARGE_RX_RING_COUNT;
2037             ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
2038                 cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
2039                 rxd = &sc->arge_cdata.arge_rxdesc[cons];
2040                 m = rxd->rx_m;
2041
2042                 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
2043                        break;
2044
2045                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
2046
2047                 prog++;
2048
2049                 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
2050                 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
2051                     BUS_DMASYNC_POSTREAD);
2052                 m = rxd->rx_m;
2053
2054                 arge_fixup_rx(m);
2055                 m->m_pkthdr.rcvif = ifp;
2056                 /* Skip 4 bytes of CRC */
2057                 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
2058                 ifp->if_ipackets++;
2059                 rx_npkts++;
2060
2061                 ARGE_UNLOCK(sc);
2062                 (*ifp->if_input)(ifp, m);
2063                 ARGE_LOCK(sc);
2064                 cur_rx->packet_addr = 0;
2065         }
2066
2067         if (prog > 0) {
2068
2069                 i = sc->arge_cdata.arge_rx_cons;
2070                 for (; prog > 0 ; prog--) {
2071                         if (arge_newbuf(sc, i) != 0) {
2072                                 device_printf(sc->arge_dev,
2073                                     "Failed to allocate buffer\n");
2074                                 break;
2075                         }
2076                         ARGE_INC(i, ARGE_RX_RING_COUNT);
2077                 }
2078
2079                 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
2080                     sc->arge_cdata.arge_rx_ring_map,
2081                     BUS_DMASYNC_PREWRITE);
2082
2083                 sc->arge_cdata.arge_rx_cons = cons;
2084         }
2085
2086         return (rx_npkts);
2087 }
2088
2089 static int
2090 arge_intr_filter(void *arg)
2091 {
2092         struct arge_softc       *sc = arg;
2093         uint32_t                status, ints;
2094
2095         status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
2096         ints = ARGE_READ(sc, AR71XX_DMA_INTR);
2097
2098         ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints,
2099             "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
2100             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
2101         ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status,
2102             "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
2103             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
2104
2105         if (status & DMA_INTR_ALL) {
2106                 sc->arge_intr_status |= status;
2107                 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
2108                 return (FILTER_SCHEDULE_THREAD);
2109         }
2110
2111         sc->arge_intr_status = 0;
2112         return (FILTER_STRAY);
2113 }
2114
2115 static void
2116 arge_intr(void *arg)
2117 {
2118         struct arge_softc       *sc = arg;
2119         uint32_t                status;
2120         struct ifnet            *ifp = sc->arge_ifp;
2121
2122         status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
2123         status |= sc->arge_intr_status;
2124
2125         ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status,
2126             "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD"
2127             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
2128
2129         /*
2130          * Is it our interrupt at all?
2131          */
2132         if (status == 0)
2133                 return;
2134
2135         if (status & DMA_INTR_RX_BUS_ERROR) {
2136                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
2137                 device_printf(sc->arge_dev, "RX bus error");
2138                 return;
2139         }
2140
2141         if (status & DMA_INTR_TX_BUS_ERROR) {
2142                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
2143                 device_printf(sc->arge_dev, "TX bus error");
2144                 return;
2145         }
2146
2147         ARGE_LOCK(sc);
2148
2149         if (status & DMA_INTR_RX_PKT_RCVD)
2150                 arge_rx_locked(sc);
2151
2152         /*
2153          * RX overrun disables the receiver.
2154          * Clear indication and re-enable rx.
2155          */
2156         if ( status & DMA_INTR_RX_OVERFLOW) {
2157                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
2158                 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
2159                 sc->stats.rx_overflow++;
2160         }
2161
2162         if (status & DMA_INTR_TX_PKT_SENT)
2163                 arge_tx_locked(sc);
2164         /*
2165          * Underrun turns off TX. Clear underrun indication.
2166          * If there's anything left in the ring, reactivate the tx.
2167          */
2168         if (status & DMA_INTR_TX_UNDERRUN) {
2169                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
2170                 sc->stats.tx_underflow++;
2171                 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: TX underrun; tx_cnt=%d\n",
2172                     __func__, sc->arge_cdata.arge_tx_cnt);
2173                 if (sc->arge_cdata.arge_tx_cnt > 0 ) {
2174                         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL,
2175                             DMA_TX_CONTROL_EN);
2176                 }
2177         }
2178
2179         /*
2180          * If we've finished TXing and there's space for more packets
2181          * to be queued for TX, do so. Otherwise we may end up in a
2182          * situation where the interface send queue was filled
2183          * whilst the hardware queue was full, then the hardware
2184          * queue was drained by the interface send queue wasn't,
2185          * and thus if_start() is never called to kick-start
2186          * the send process (and all subsequent packets are simply
2187          * discarded.
2188          *
2189          * XXX TODO: make sure that the hardware deals nicely
2190          * with the possibility of the queue being enabled above
2191          * after a TX underrun, then having the hardware queue added
2192          * to below.
2193          */
2194         if (status & (DMA_INTR_TX_PKT_SENT | DMA_INTR_TX_UNDERRUN) &&
2195             (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
2196                 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2197                         arge_start_locked(ifp);
2198         }
2199
2200         /*
2201          * We handled all bits, clear status
2202          */
2203         sc->arge_intr_status = 0;
2204         ARGE_UNLOCK(sc);
2205         /*
2206          * re-enable all interrupts
2207          */
2208         ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
2209 }
2210
2211
2212 static void
2213 arge_tick(void *xsc)
2214 {
2215         struct arge_softc       *sc = xsc;
2216         struct mii_data         *mii;
2217
2218         ARGE_LOCK_ASSERT(sc);
2219
2220         if (sc->arge_miibus) {
2221                 mii = device_get_softc(sc->arge_miibus);
2222                 mii_tick(mii);
2223                 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
2224         }
2225 }
2226
2227 int
2228 arge_multiphy_mediachange(struct ifnet *ifp)
2229 {
2230         struct arge_softc *sc = ifp->if_softc;
2231         struct ifmedia *ifm = &sc->arge_ifmedia;
2232         struct ifmedia_entry *ife = ifm->ifm_cur;
2233
2234         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2235                 return (EINVAL);
2236
2237         if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
2238                 device_printf(sc->arge_dev,
2239                     "AUTO is not supported for multiphy MAC");
2240                 return (EINVAL);
2241         }
2242
2243         /*
2244          * Ignore everything
2245          */
2246         return (0);
2247 }
2248
2249 void
2250 arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2251 {
2252         struct arge_softc *sc = ifp->if_softc;
2253
2254         ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2255         ifmr->ifm_active = IFM_ETHER | sc->arge_media_type |
2256             sc->arge_duplex_mode;
2257 }
2258
2259 #if defined(ARGE_MDIO)
2260 static int
2261 argemdio_probe(device_t dev)
2262 {
2263         device_set_desc(dev, "Atheros AR71xx built-in ethernet interface, MDIO controller");
2264         return (0);
2265 }
2266
2267 static int
2268 argemdio_attach(device_t dev)
2269 {
2270         struct arge_softc       *sc;
2271         int                     error = 0;
2272
2273         sc = device_get_softc(dev);
2274         sc->arge_dev = dev;
2275         sc->arge_mac_unit = device_get_unit(dev);
2276         sc->arge_rid = 0;
2277         sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 
2278             &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE);
2279         if (sc->arge_res == NULL) {
2280                 device_printf(dev, "couldn't map memory\n");
2281                 error = ENXIO;
2282                 goto fail;
2283         }
2284
2285         /* Reset MAC - required for AR71xx MDIO to successfully occur */
2286         arge_reset_mac(sc);
2287         /* Reset MII bus */
2288         arge_reset_miibus(sc);
2289
2290         bus_generic_probe(dev);
2291         bus_enumerate_hinted_children(dev);
2292         error = bus_generic_attach(dev);
2293 fail:
2294         return (error);
2295 }
2296
2297 static int
2298 argemdio_detach(device_t dev)
2299 {
2300         return (0);
2301 }
2302
2303 #endif