]> CyberLeo.Net >> Repos - FreeBSD/releng/9.0.git/blob - sys/mips/atheros/if_arge.c
Copy stable/9 to releng/9.0 as part of the FreeBSD 9.0-RELEASE release
[FreeBSD/releng/9.0.git] / sys / mips / atheros / if_arge.c
1 /*-
2  * Copyright (c) 2009, Oleksandr Tymoshenko
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 /*
32  * AR71XX gigabit ethernet driver
33  */
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
36 #endif
37
38 #include <sys/param.h>
39 #include <sys/endian.h>
40 #include <sys/systm.h>
41 #include <sys/sockio.h>
42 #include <sys/mbuf.h>
43 #include <sys/malloc.h>
44 #include <sys/kernel.h>
45 #include <sys/module.h>
46 #include <sys/socket.h>
47 #include <sys/taskqueue.h>
48 #include <sys/sysctl.h>
49
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/ethernet.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56
57 #include <net/bpf.h>
58
59 #include <machine/bus.h>
60 #include <machine/cache.h>
61 #include <machine/resource.h>
62 #include <vm/vm_param.h>
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65 #include <machine/pmap.h>
66 #include <sys/bus.h>
67 #include <sys/rman.h>
68
69 #include <dev/mii/mii.h>
70 #include <dev/mii/miivar.h>
71
72 #include <dev/pci/pcireg.h>
73 #include <dev/pci/pcivar.h>
74
75 MODULE_DEPEND(arge, ether, 1, 1, 1);
76 MODULE_DEPEND(arge, miibus, 1, 1, 1);
77
78 #include "miibus_if.h"
79
80 #include <mips/atheros/ar71xxreg.h>
81 #include <mips/atheros/if_argevar.h>
82 #include <mips/atheros/ar71xx_setup.h>
83 #include <mips/atheros/ar71xx_cpudef.h>
84
85 typedef enum {
86         ARGE_DBG_MII    =       0x00000001,
87         ARGE_DBG_INTR   =       0x00000002,
88         ARGE_DBG_TX     =       0x00000004,
89         ARGE_DBG_RX     =       0x00000008,
90         ARGE_DBG_ERR    =       0x00000010,
91         ARGE_DBG_RESET  =       0x00000020,
92 } arge_debug_flags;
93
94 #ifdef ARGE_DEBUG
95 #define ARGEDEBUG(_sc, _m, ...)                                         \
96         do {                                                            \
97                 if ((_m) & (_sc)->arge_debug)                           \
98                         device_printf((_sc)->arge_dev, __VA_ARGS__);    \
99         } while (0)
100 #else
101 #define ARGEDEBUG(_sc, _m, ...)
102 #endif
103
104 static int arge_attach(device_t);
105 static int arge_detach(device_t);
106 static void arge_flush_ddr(struct arge_softc *);
107 static int arge_ifmedia_upd(struct ifnet *);
108 static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
109 static int arge_ioctl(struct ifnet *, u_long, caddr_t);
110 static void arge_init(void *);
111 static void arge_init_locked(struct arge_softc *);
112 static void arge_link_task(void *, int);
113 static void arge_set_pll(struct arge_softc *, int, int);
114 static int arge_miibus_readreg(device_t, int, int);
115 static void arge_miibus_statchg(device_t);
116 static int arge_miibus_writereg(device_t, int, int, int);
117 static int arge_probe(device_t);
118 static void arge_reset_dma(struct arge_softc *);
119 static int arge_resume(device_t);
120 static int arge_rx_ring_init(struct arge_softc *);
121 static int arge_tx_ring_init(struct arge_softc *);
122 #ifdef DEVICE_POLLING
123 static int arge_poll(struct ifnet *, enum poll_cmd, int);
124 #endif
125 static int arge_shutdown(device_t);
126 static void arge_start(struct ifnet *);
127 static void arge_start_locked(struct ifnet *);
128 static void arge_stop(struct arge_softc *);
129 static int arge_suspend(device_t);
130
131 static int arge_rx_locked(struct arge_softc *);
132 static void arge_tx_locked(struct arge_softc *);
133 static void arge_intr(void *);
134 static int arge_intr_filter(void *);
135 static void arge_tick(void *);
136
137 /*
138  * ifmedia callbacks for multiPHY MAC
139  */
140 void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *);
141 int arge_multiphy_mediachange(struct ifnet *);
142
143 static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
144 static int arge_dma_alloc(struct arge_softc *);
145 static void arge_dma_free(struct arge_softc *);
146 static int arge_newbuf(struct arge_softc *, int);
147 static __inline void arge_fixup_rx(struct mbuf *);
148
149 static device_method_t arge_methods[] = {
150         /* Device interface */
151         DEVMETHOD(device_probe,         arge_probe),
152         DEVMETHOD(device_attach,        arge_attach),
153         DEVMETHOD(device_detach,        arge_detach),
154         DEVMETHOD(device_suspend,       arge_suspend),
155         DEVMETHOD(device_resume,        arge_resume),
156         DEVMETHOD(device_shutdown,      arge_shutdown),
157
158         /* bus interface */
159         DEVMETHOD(bus_print_child,      bus_generic_print_child),
160         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
161
162         /* MII interface */
163         DEVMETHOD(miibus_readreg,       arge_miibus_readreg),
164         DEVMETHOD(miibus_writereg,      arge_miibus_writereg),
165         DEVMETHOD(miibus_statchg,       arge_miibus_statchg),
166
167         { 0, 0 }
168 };
169
170 static driver_t arge_driver = {
171         "arge",
172         arge_methods,
173         sizeof(struct arge_softc)
174 };
175
176 static devclass_t arge_devclass;
177
178 DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
179 DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
180
181 /*
182  * RedBoot passes MAC address to entry point as environment 
183  * variable. platfrom_start parses it and stores in this variable
184  */
185 extern uint32_t ar711_base_mac[ETHER_ADDR_LEN];
186
187 static struct mtx miibus_mtx;
188
189 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF);
190
191
192 /*
193  * Flushes all 
194  */
195 static void
196 arge_flush_ddr(struct arge_softc *sc)
197 {
198         if (sc->arge_mac_unit == 0)
199                 ar71xx_device_flush_ddr_ge0();
200         else
201                 ar71xx_device_flush_ddr_ge1();
202 }
203
204 static int 
205 arge_probe(device_t dev)
206 {
207
208         device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
209         return (0);
210 }
211
212 static void
213 arge_attach_sysctl(device_t dev)
214 {
215         struct arge_softc *sc = device_get_softc(dev);
216         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
217         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
218
219 #ifdef  ARGE_DEBUG
220         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
221                 "debug", CTLFLAG_RW, &sc->arge_debug, 0,
222                 "arge interface debugging flags");
223 #endif
224
225         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
226                 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0,
227                 "number of TX aligned packets");
228
229         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
230                 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 0,
231                 "number of TX unaligned packets");
232
233 #ifdef  ARGE_DEBUG
234         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod",
235             CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, "");
236         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons",
237             CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, "");
238         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt",
239             CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, "");
240 #endif
241 }
242
243 static int
244 arge_attach(device_t dev)
245 {
246         uint8_t                 eaddr[ETHER_ADDR_LEN];
247         struct ifnet            *ifp;
248         struct arge_softc       *sc;
249         int                     error = 0, rid, phymask;
250         uint32_t                reg, rnd;
251         int                     is_base_mac_empty, i, phys_total;
252         uint32_t                hint;
253         long                    eeprom_mac_addr = 0;
254
255         sc = device_get_softc(dev);
256         sc->arge_dev = dev;
257         sc->arge_mac_unit = device_get_unit(dev);
258
259         /*
260          * Some units (eg the TP-Link WR-1043ND) do not have a convenient
261          * EEPROM location to read the ethernet MAC address from.
262          * OpenWRT simply snaffles it from a fixed location.
263          *
264          * Since multiple units seem to use this feature, include
265          * a method of setting the MAC address based on an flash location
266          * in CPU address space.
267          */
268         if (sc->arge_mac_unit == 0 &&
269             resource_long_value(device_get_name(dev), device_get_unit(dev), 
270             "eeprommac", &eeprom_mac_addr) == 0) {
271                 int i;
272                 const char *mac = (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr);
273                 device_printf(dev, "Overriding MAC from EEPROM\n");
274                 for (i = 0; i < 6; i++) {
275                         ar711_base_mac[i] = mac[i];
276                 }
277         }
278
279         KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)), 
280             ("if_arge: Only MAC0 and MAC1 supported"));
281
282         /*
283          *  Get which PHY of 5 available we should use for this unit
284          */
285         if (resource_int_value(device_get_name(dev), device_get_unit(dev), 
286             "phymask", &phymask) != 0) {
287                 /*
288                  * Use port 4 (WAN) for GE0. For any other port use 
289                  * its PHY the same as its unit number 
290                  */
291                 if (sc->arge_mac_unit == 0)
292                         phymask = (1 << 4);
293                 else
294                         /* Use all phys up to 4 */
295                         phymask = (1 << 4) - 1;
296
297                 device_printf(dev, "No PHY specified, using mask %d\n", phymask);
298         }
299
300         /*
301          *  Get default media & duplex mode, by default its Base100T 
302          *  and full duplex
303          */
304         if (resource_int_value(device_get_name(dev), device_get_unit(dev), 
305             "media", &hint) != 0)
306                 hint = 0;
307
308         if (hint == 1000)
309                 sc->arge_media_type = IFM_1000_T;
310         else
311                 sc->arge_media_type = IFM_100_TX;
312
313         if (resource_int_value(device_get_name(dev), device_get_unit(dev), 
314             "fduplex", &hint) != 0)
315                 hint = 1;
316
317         if (hint)
318                 sc->arge_duplex_mode = IFM_FDX;
319         else
320                 sc->arge_duplex_mode = 0;
321
322         sc->arge_phymask = phymask;
323
324         mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
325             MTX_DEF);
326         callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
327         TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
328
329         /* Map control/status registers. */
330         sc->arge_rid = 0;
331         sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 
332             &sc->arge_rid, RF_ACTIVE);
333
334         if (sc->arge_res == NULL) {
335                 device_printf(dev, "couldn't map memory\n");
336                 error = ENXIO;
337                 goto fail;
338         }
339
340         /* Allocate interrupts */
341         rid = 0;
342         sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 
343             RF_SHAREABLE | RF_ACTIVE);
344
345         if (sc->arge_irq == NULL) {
346                 device_printf(dev, "couldn't map interrupt\n");
347                 error = ENXIO;
348                 goto fail;
349         }
350
351         /* Allocate ifnet structure. */
352         ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
353
354         if (ifp == NULL) {
355                 device_printf(dev, "couldn't allocate ifnet structure\n");
356                 error = ENOSPC;
357                 goto fail;
358         }
359
360         ifp->if_softc = sc;
361         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
362         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
363         ifp->if_ioctl = arge_ioctl;
364         ifp->if_start = arge_start;
365         ifp->if_init = arge_init;
366         sc->arge_if_flags = ifp->if_flags;
367
368         /* XXX: add real size */
369         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
370         ifp->if_snd.ifq_maxlen = ifqmaxlen;
371         IFQ_SET_READY(&ifp->if_snd);
372
373         ifp->if_capenable = ifp->if_capabilities;
374 #ifdef DEVICE_POLLING
375         ifp->if_capabilities |= IFCAP_POLLING;
376 #endif
377
378         is_base_mac_empty = 1;
379         for (i = 0; i < ETHER_ADDR_LEN; i++) {
380                 eaddr[i] = ar711_base_mac[i] & 0xff;
381                 if (eaddr[i] != 0)
382                         is_base_mac_empty = 0;
383         }
384
385         if (is_base_mac_empty) {
386                 /*
387                  * No MAC address configured. Generate the random one.
388                  */
389                 if  (bootverbose)
390                         device_printf(dev, 
391                             "Generating random ethernet address.\n");
392
393                 rnd = arc4random();
394                 eaddr[0] = 'b';
395                 eaddr[1] = 's';
396                 eaddr[2] = 'd';
397                 eaddr[3] = (rnd >> 24) & 0xff;
398                 eaddr[4] = (rnd >> 16) & 0xff;
399                 eaddr[5] = (rnd >> 8) & 0xff;
400         }
401
402         if (sc->arge_mac_unit != 0)
403                 eaddr[5] +=  sc->arge_mac_unit;
404
405         if (arge_dma_alloc(sc) != 0) {
406                 error = ENXIO;
407                 goto fail;
408         }
409
410         /* Initialize the MAC block */
411         
412         /* Step 1. Soft-reset MAC */
413         ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
414         DELAY(20);
415
416         /* Step 2. Punt the MAC core from the central reset register */
417         ar71xx_device_stop(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
418         DELAY(100);
419         ar71xx_device_start(sc->arge_mac_unit == 0 ? RST_RESET_GE0_MAC : RST_RESET_GE1_MAC);
420
421         /* Step 3. Reconfigure MAC block */
422         ARGE_WRITE(sc, AR71XX_MAC_CFG1, 
423                 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
424                 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
425
426         reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
427         reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
428         ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
429
430         ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
431
432         /* Reset MII bus */
433         ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET);
434         DELAY(100);
435         ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_CLOCK_DIV_28);
436         DELAY(100);
437
438         /* 
439          * Set all Ethernet address registers to the same initial values
440          * set all four addresses to 66-88-aa-cc-dd-ee 
441          */
442         ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, 
443             (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8)  | eaddr[5]);
444         ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (eaddr[0] << 8) | eaddr[1]);
445
446         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0, 
447             FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
448
449         switch (ar71xx_soc) {
450                 case AR71XX_SOC_AR7240:
451                 case AR71XX_SOC_AR7241:
452                 case AR71XX_SOC_AR7242:
453                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff);
454                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa);
455                         break;
456                 default:
457                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
458                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
459         }
460
461         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH, 
462             FIFO_RX_FILTMATCH_DEFAULT);
463
464         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 
465             FIFO_RX_FILTMASK_DEFAULT);
466
467         /* 
468          * Check if we have single-PHY MAC or multi-PHY
469          */
470         phys_total = 0;
471         for (i = 0; i < ARGE_NPHY; i++)
472                 if (phymask & (1 << i))
473                         phys_total ++;
474
475         if (phys_total == 0) {
476                 error = EINVAL;
477                 goto fail;
478         }
479
480         if (phys_total == 1) {
481                 /* Do MII setup. */
482                 error = mii_attach(dev, &sc->arge_miibus, ifp,
483                     arge_ifmedia_upd, arge_ifmedia_sts, BMSR_DEFCAPMASK,
484                     MII_PHY_ANY, MII_OFFSET_ANY, 0);
485                 if (error != 0) {
486                         device_printf(dev, "attaching PHYs failed\n");
487                         goto fail;
488                 }
489         }
490         else {
491                 ifmedia_init(&sc->arge_ifmedia, 0, 
492                     arge_multiphy_mediachange,
493                     arge_multiphy_mediastatus);
494                 ifmedia_add(&sc->arge_ifmedia,
495                     IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode, 
496                     0, NULL);
497                 ifmedia_set(&sc->arge_ifmedia,
498                     IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode);
499                 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
500         }
501
502         /* Call MI attach routine. */
503         ether_ifattach(ifp, eaddr);
504
505         /* Hook interrupt last to avoid having to lock softc */
506         error = bus_setup_intr(dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
507             arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
508
509         if (error) {
510                 device_printf(dev, "couldn't set up irq\n");
511                 ether_ifdetach(ifp);
512                 goto fail;
513         }
514
515         /* setup sysctl variables */
516         arge_attach_sysctl(dev);
517
518 fail:
519         if (error) 
520                 arge_detach(dev);
521
522         return (error);
523 }
524
525 static int
526 arge_detach(device_t dev)
527 {
528         struct arge_softc       *sc = device_get_softc(dev);
529         struct ifnet            *ifp = sc->arge_ifp;
530
531         KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized"));
532
533         /* These should only be active if attach succeeded */
534         if (device_is_attached(dev)) {
535                 ARGE_LOCK(sc);
536                 sc->arge_detach = 1;
537 #ifdef DEVICE_POLLING
538                 if (ifp->if_capenable & IFCAP_POLLING)
539                         ether_poll_deregister(ifp);
540 #endif
541
542                 arge_stop(sc);
543                 ARGE_UNLOCK(sc);
544                 taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
545                 ether_ifdetach(ifp);
546         }
547
548         if (sc->arge_miibus)
549                 device_delete_child(dev, sc->arge_miibus);
550
551         bus_generic_detach(dev);
552
553         if (sc->arge_intrhand)
554                 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
555
556         if (sc->arge_res)
557                 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid, 
558                     sc->arge_res);
559
560         if (ifp)
561                 if_free(ifp);
562
563         arge_dma_free(sc);
564
565         mtx_destroy(&sc->arge_mtx);
566
567         return (0);
568
569 }
570
571 static int
572 arge_suspend(device_t dev)
573 {
574
575         panic("%s", __func__);
576         return 0;
577 }
578
579 static int
580 arge_resume(device_t dev)
581 {
582
583         panic("%s", __func__);
584         return 0;
585 }
586
587 static int
588 arge_shutdown(device_t dev)
589 {
590         struct arge_softc       *sc;
591
592         sc = device_get_softc(dev);
593
594         ARGE_LOCK(sc);
595         arge_stop(sc);
596         ARGE_UNLOCK(sc);
597
598         return (0);
599 }
600
601 static int
602 arge_miibus_readreg(device_t dev, int phy, int reg)
603 {
604         struct arge_softc * sc = device_get_softc(dev);
605         int i, result;
606         uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT) 
607             | (reg & MAC_MII_REG_MASK);
608
609         if ((sc->arge_phymask  & (1 << phy)) == 0)
610                 return (0);
611
612         mtx_lock(&miibus_mtx);
613         ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
614         ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
615         ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
616
617         i = ARGE_MII_TIMEOUT;
618         while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) & 
619             MAC_MII_INDICATOR_BUSY) && (i--))
620                 DELAY(5);
621
622         if (i < 0) {
623                 mtx_unlock(&miibus_mtx);
624                 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
625                 /* XXX: return ERRNO istead? */
626                 return (-1);
627         }
628
629         result = ARGE_MII_READ(AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
630         ARGE_MII_WRITE(AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
631         mtx_unlock(&miibus_mtx);
632
633         ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__, 
634                  phy, reg, addr, result);
635
636         return (result);
637 }
638
639 static int
640 arge_miibus_writereg(device_t dev, int phy, int reg, int data)
641 {
642         struct arge_softc * sc = device_get_softc(dev);
643         int i;
644         uint32_t addr = 
645             (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
646
647
648         if ((sc->arge_phymask  & (1 << phy)) == 0)
649                 return (-1);
650
651         ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__, 
652             phy, reg, data);
653
654         mtx_lock(&miibus_mtx);
655         ARGE_MII_WRITE(AR71XX_MAC_MII_ADDR, addr);
656         ARGE_MII_WRITE(AR71XX_MAC_MII_CONTROL, data);
657
658         i = ARGE_MII_TIMEOUT;
659         while ((ARGE_MII_READ(AR71XX_MAC_MII_INDICATOR) & 
660             MAC_MII_INDICATOR_BUSY) && (i--))
661                 DELAY(5);
662
663         mtx_unlock(&miibus_mtx);
664
665         if (i < 0) {
666                 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
667                 /* XXX: return ERRNO istead? */
668                 return (-1);
669         }
670
671         return (0);
672 }
673
674 static void
675 arge_miibus_statchg(device_t dev)
676 {
677         struct arge_softc               *sc;
678
679         sc = device_get_softc(dev);
680         taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
681 }
682
683 static void
684 arge_link_task(void *arg, int pending)
685 {
686         struct arge_softc       *sc;
687         struct mii_data         *mii;
688         struct ifnet            *ifp;
689         uint32_t                media, duplex;
690
691         sc = (struct arge_softc *)arg;
692
693         ARGE_LOCK(sc);
694         mii = device_get_softc(sc->arge_miibus);
695         ifp = sc->arge_ifp;
696         if (mii == NULL || ifp == NULL ||
697             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
698                 ARGE_UNLOCK(sc);
699                 return;
700         }
701
702         if (mii->mii_media_status & IFM_ACTIVE) {
703
704                 media = IFM_SUBTYPE(mii->mii_media_active);
705
706                 if (media != IFM_NONE) {
707                         sc->arge_link_status = 1;
708                         duplex = mii->mii_media_active & IFM_GMASK;
709                         arge_set_pll(sc, media, duplex);
710                 }
711         } else
712                 sc->arge_link_status = 0;
713
714         ARGE_UNLOCK(sc);
715 }
716
717 static void
718 arge_set_pll(struct arge_softc *sc, int media, int duplex)
719 {
720         uint32_t                cfg, ifcontrol, rx_filtmask;
721         uint32_t                fifo_tx;
722         int if_speed;
723
724         cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
725         cfg &= ~(MAC_CFG2_IFACE_MODE_1000 
726             | MAC_CFG2_IFACE_MODE_10_100 
727             | MAC_CFG2_FULL_DUPLEX);
728
729         if (duplex == IFM_FDX)
730                 cfg |= MAC_CFG2_FULL_DUPLEX;
731
732         ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
733         ifcontrol &= ~MAC_IFCONTROL_SPEED;
734         rx_filtmask = 
735             ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
736         rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
737
738         switch(media) {
739         case IFM_10_T:
740                 cfg |= MAC_CFG2_IFACE_MODE_10_100;
741                 if_speed = 10;
742                 break;
743         case IFM_100_TX:
744                 cfg |= MAC_CFG2_IFACE_MODE_10_100;
745                 ifcontrol |= MAC_IFCONTROL_SPEED;
746                 if_speed = 100;
747                 break;
748         case IFM_1000_T:
749         case IFM_1000_SX:
750                 cfg |= MAC_CFG2_IFACE_MODE_1000;
751                 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
752                 if_speed = 1000;
753                 break;
754         default:
755                 if_speed = 100;
756                 device_printf(sc->arge_dev, 
757                     "Unknown media %d\n", media);
758         }
759
760         switch (ar71xx_soc) {
761                 case AR71XX_SOC_AR7240:
762                 case AR71XX_SOC_AR7241:
763                 case AR71XX_SOC_AR7242:
764                         fifo_tx = 0x01f00140;
765                         break;
766                 case AR71XX_SOC_AR9130:
767                 case AR71XX_SOC_AR9132:
768                         fifo_tx = 0x00780fff;
769                         break;
770                 default:
771                         fifo_tx = 0x008001ff;
772         }
773
774         ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
775         ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
776         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, 
777             rx_filtmask);
778         ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx);
779
780         /* set PLL registers */
781         if (sc->arge_mac_unit == 0)
782                 ar71xx_device_set_pll_ge0(if_speed);
783         else
784                 ar71xx_device_set_pll_ge1(if_speed);
785 }
786
787
788 static void
789 arge_reset_dma(struct arge_softc *sc)
790 {
791         ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
792         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
793
794         ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
795         ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
796
797         /* Clear all possible RX interrupts */
798         while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
799                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
800
801         /* 
802          * Clear all possible TX interrupts
803          */
804         while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
805                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
806
807         /* 
808          * Now Rx/Tx errors
809          */
810         ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, 
811             DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
812         ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, 
813             DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
814 }
815
816
817
818 static void
819 arge_init(void *xsc)
820 {
821         struct arge_softc        *sc = xsc;
822
823         ARGE_LOCK(sc);
824         arge_init_locked(sc);
825         ARGE_UNLOCK(sc);
826 }
827
828 static void
829 arge_init_locked(struct arge_softc *sc)
830 {
831         struct ifnet            *ifp = sc->arge_ifp;
832         struct mii_data         *mii;
833
834         ARGE_LOCK_ASSERT(sc);
835
836         arge_stop(sc);
837
838         /* Init circular RX list. */
839         if (arge_rx_ring_init(sc) != 0) {
840                 device_printf(sc->arge_dev,
841                     "initialization failed: no memory for rx buffers\n");
842                 arge_stop(sc);
843                 return;
844         }
845
846         /* Init tx descriptors. */
847         arge_tx_ring_init(sc);
848
849         arge_reset_dma(sc);
850
851
852         if (sc->arge_miibus) {
853                 sc->arge_link_status = 0;
854                 mii = device_get_softc(sc->arge_miibus);
855                 mii_mediachg(mii);
856         }
857         else {
858                 /*
859                  * Sun always shines over multiPHY interface
860                  */
861                 sc->arge_link_status = 1;
862         }
863
864         ifp->if_drv_flags |= IFF_DRV_RUNNING;
865         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
866
867         if (sc->arge_miibus)
868                 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
869
870         ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
871         ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
872
873         /* Start listening */
874         ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
875
876         /* Enable interrupts */
877         ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
878 }
879
880 /*
881  * Return whether the mbuf chain is correctly aligned
882  * for the arge TX engine.
883  *
884  * The TX engine requires each fragment to be aligned to a
885  * 4 byte boundary and the size of each fragment except
886  * the last to be a multiple of 4 bytes.
887  */
888 static int
889 arge_mbuf_chain_is_tx_aligned(struct mbuf *m0)
890 {
891         struct mbuf *m;
892
893         for (m = m0; m != NULL; m = m->m_next) {
894                 if((mtod(m, intptr_t) & 3) != 0)
895                         return 0;
896                 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0))
897                         return 0;
898         }
899         return 1;
900 }
901
902 /*
903  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
904  * pointers to the fragment pointers.
905  */
906 static int
907 arge_encap(struct arge_softc *sc, struct mbuf **m_head)
908 {
909         struct arge_txdesc      *txd;
910         struct arge_desc        *desc, *prev_desc;
911         bus_dma_segment_t       txsegs[ARGE_MAXFRAGS];
912         int                     error, i, nsegs, prod, prev_prod;
913         struct mbuf             *m;
914
915         ARGE_LOCK_ASSERT(sc);
916
917         /*
918          * Fix mbuf chain, all fragments should be 4 bytes aligned and
919          * even 4 bytes
920          */
921         m = *m_head;
922         if (! arge_mbuf_chain_is_tx_aligned(m)) {
923                 sc->stats.tx_pkts_unaligned++;
924                 m = m_defrag(*m_head, M_DONTWAIT);
925                 if (m == NULL) {
926                         *m_head = NULL;
927                         return (ENOBUFS);
928                 }
929                 *m_head = m;
930         } else
931                 sc->stats.tx_pkts_aligned++;
932
933         prod = sc->arge_cdata.arge_tx_prod;
934         txd = &sc->arge_cdata.arge_txdesc[prod];
935         error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag, 
936             txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
937
938         if (error == EFBIG) {
939                 panic("EFBIG");
940         } else if (error != 0)
941                 return (error);
942
943         if (nsegs == 0) {
944                 m_freem(*m_head);
945                 *m_head = NULL;
946                 return (EIO);
947         }
948
949         /* Check number of available descriptors. */
950         if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 1)) {
951                 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
952                 return (ENOBUFS);
953         }
954
955         txd->tx_m = *m_head;
956         bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
957             BUS_DMASYNC_PREWRITE);
958
959         /* 
960          * Make a list of descriptors for this packet. DMA controller will
961          * walk through it while arge_link is not zero.
962          */
963         prev_prod = prod;
964         desc = prev_desc = NULL;
965         for (i = 0; i < nsegs; i++) {
966                 desc = &sc->arge_rdata.arge_tx_ring[prod];
967                 desc->packet_ctrl = ARGE_DMASIZE(txsegs[i].ds_len);
968
969                 if (txsegs[i].ds_addr & 3)
970                         panic("TX packet address unaligned\n");
971
972                 desc->packet_addr = txsegs[i].ds_addr;
973                 
974                 /* link with previous descriptor */
975                 if (prev_desc)
976                         prev_desc->packet_ctrl |= ARGE_DESC_MORE;
977
978                 sc->arge_cdata.arge_tx_cnt++;
979                 prev_desc = desc;
980                 ARGE_INC(prod, ARGE_TX_RING_COUNT);
981         }
982
983         /* Update producer index. */
984         sc->arge_cdata.arge_tx_prod = prod;
985
986         /* Sync descriptors. */
987         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
988             sc->arge_cdata.arge_tx_ring_map,
989             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
990
991         /* Start transmitting */
992         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: setting DMA_TX_CONTROL_EN\n", __func__);
993         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
994         return (0);
995 }
996
997 static void
998 arge_start(struct ifnet *ifp)
999 {
1000         struct arge_softc        *sc;
1001
1002         sc = ifp->if_softc;
1003
1004         ARGE_LOCK(sc);
1005         arge_start_locked(ifp);
1006         ARGE_UNLOCK(sc);
1007 }
1008
1009 static void
1010 arge_start_locked(struct ifnet *ifp)
1011 {
1012         struct arge_softc       *sc;
1013         struct mbuf             *m_head;
1014         int                     enq = 0;
1015
1016         sc = ifp->if_softc;
1017
1018         ARGE_LOCK_ASSERT(sc);
1019
1020         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: beginning\n", __func__);
1021
1022         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1023             IFF_DRV_RUNNING || sc->arge_link_status == 0 )
1024                 return;
1025
1026         /*
1027          * Before we go any further, check whether we're already full.
1028          * The below check errors out immediately if the ring is full
1029          * and never gets a chance to set this flag. Although it's
1030          * likely never needed, this at least avoids an unexpected
1031          * situation.
1032          */
1033         if (sc->arge_cdata.arge_tx_cnt >= ARGE_TX_RING_COUNT - 2) {
1034                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1035                 ARGEDEBUG(sc, ARGE_DBG_ERR, "%s: tx_cnt %d >= max %d; setting IFF_DRV_OACTIVE\n",
1036                     __func__, sc->arge_cdata.arge_tx_cnt, ARGE_TX_RING_COUNT - 2);
1037                 return;
1038         }
1039
1040         arge_flush_ddr(sc);
1041
1042         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1043             sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
1044                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1045                 if (m_head == NULL)
1046                         break;
1047
1048
1049                 /*
1050                  * Pack the data into the transmit ring.
1051                  */
1052                 if (arge_encap(sc, &m_head)) {
1053                         if (m_head == NULL)
1054                                 break;
1055                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1056                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1057                         break;
1058                 }
1059
1060                 enq++;
1061                 /*
1062                  * If there's a BPF listener, bounce a copy of this frame
1063                  * to him.
1064                  */
1065                 ETHER_BPF_MTAP(ifp, m_head);
1066         }
1067         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: finished; queued %d packets\n", __func__, enq);
1068 }
1069
1070 static void
1071 arge_stop(struct arge_softc *sc)
1072 {
1073         struct ifnet        *ifp;
1074
1075         ARGE_LOCK_ASSERT(sc);
1076
1077         ifp = sc->arge_ifp;
1078         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1079         if (sc->arge_miibus)
1080                 callout_stop(&sc->arge_stat_callout);
1081
1082         /* mask out interrupts */
1083         ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1084
1085         arge_reset_dma(sc);
1086 }
1087
1088
1089 static int
1090 arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1091 {
1092         struct arge_softc               *sc = ifp->if_softc;
1093         struct ifreq            *ifr = (struct ifreq *) data;
1094         struct mii_data         *mii;
1095         int                     error;
1096 #ifdef DEVICE_POLLING
1097         int                     mask;
1098 #endif
1099
1100         switch (command) {
1101         case SIOCSIFFLAGS:
1102                 ARGE_LOCK(sc);
1103                 if ((ifp->if_flags & IFF_UP) != 0) {
1104                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1105                                 if (((ifp->if_flags ^ sc->arge_if_flags)
1106                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1107                                         /* XXX: handle promisc & multi flags */
1108                                 }
1109                                         
1110                         } else {
1111                                 if (!sc->arge_detach)
1112                                         arge_init_locked(sc);
1113                         }
1114                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1115                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1116                         arge_stop(sc);
1117                 }
1118                 sc->arge_if_flags = ifp->if_flags;
1119                 ARGE_UNLOCK(sc);
1120                 error = 0;
1121                 break;
1122         case SIOCADDMULTI:
1123         case SIOCDELMULTI:
1124                 /* XXX: implement SIOCDELMULTI */
1125                 error = 0;
1126                 break;
1127         case SIOCGIFMEDIA:
1128         case SIOCSIFMEDIA:
1129                 if (sc->arge_miibus) {
1130                         mii = device_get_softc(sc->arge_miibus);
1131                         error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1132                 }
1133                 else 
1134                         error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, command);
1135                 break;
1136         case SIOCSIFCAP:
1137                 /* XXX: Check other capabilities */
1138 #ifdef DEVICE_POLLING
1139                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1140                 if (mask & IFCAP_POLLING) {
1141                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1142                                 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1143                                 error = ether_poll_register(arge_poll, ifp);
1144                                 if (error)
1145                                         return error;
1146                                 ARGE_LOCK(sc);
1147                                 ifp->if_capenable |= IFCAP_POLLING;
1148                                 ARGE_UNLOCK(sc);
1149                         } else {
1150                                 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1151                                 error = ether_poll_deregister(ifp);
1152                                 ARGE_LOCK(sc);
1153                                 ifp->if_capenable &= ~IFCAP_POLLING;
1154                                 ARGE_UNLOCK(sc);
1155                         }
1156                 }
1157                 error = 0;
1158                 break;
1159 #endif
1160         default:
1161                 error = ether_ioctl(ifp, command, data);
1162                 break;
1163         }
1164
1165         return (error);
1166 }
1167
1168 /*
1169  * Set media options.
1170  */
1171 static int
1172 arge_ifmedia_upd(struct ifnet *ifp)
1173 {
1174         struct arge_softc               *sc;
1175         struct mii_data         *mii;
1176         struct mii_softc        *miisc;
1177         int                     error;
1178
1179         sc = ifp->if_softc;
1180         ARGE_LOCK(sc);
1181         mii = device_get_softc(sc->arge_miibus);
1182         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1183                 PHY_RESET(miisc);
1184         error = mii_mediachg(mii);
1185         ARGE_UNLOCK(sc);
1186
1187         return (error);
1188 }
1189
1190 /*
1191  * Report current media status.
1192  */
1193 static void
1194 arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1195 {
1196         struct arge_softc               *sc = ifp->if_softc;
1197         struct mii_data         *mii;
1198
1199         mii = device_get_softc(sc->arge_miibus);
1200         ARGE_LOCK(sc);
1201         mii_pollstat(mii);
1202         ARGE_UNLOCK(sc);
1203         ifmr->ifm_active = mii->mii_media_active;
1204         ifmr->ifm_status = mii->mii_media_status;
1205 }
1206
1207 struct arge_dmamap_arg {
1208         bus_addr_t      arge_busaddr;
1209 };
1210
1211 static void
1212 arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1213 {
1214         struct arge_dmamap_arg  *ctx;
1215
1216         if (error != 0)
1217                 return;
1218         ctx = arg;
1219         ctx->arge_busaddr = segs[0].ds_addr;
1220 }
1221
1222 static int
1223 arge_dma_alloc(struct arge_softc *sc)
1224 {
1225         struct arge_dmamap_arg  ctx;
1226         struct arge_txdesc      *txd;
1227         struct arge_rxdesc      *rxd;
1228         int                     error, i;
1229
1230         /* Create parent DMA tag. */
1231         error = bus_dma_tag_create(
1232             bus_get_dma_tag(sc->arge_dev),      /* parent */
1233             1, 0,                       /* alignment, boundary */
1234             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1235             BUS_SPACE_MAXADDR,          /* highaddr */
1236             NULL, NULL,                 /* filter, filterarg */
1237             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1238             0,                          /* nsegments */
1239             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1240             0,                          /* flags */
1241             NULL, NULL,                 /* lockfunc, lockarg */
1242             &sc->arge_cdata.arge_parent_tag);
1243         if (error != 0) {
1244                 device_printf(sc->arge_dev, "failed to create parent DMA tag\n");
1245                 goto fail;
1246         }
1247         /* Create tag for Tx ring. */
1248         error = bus_dma_tag_create(
1249             sc->arge_cdata.arge_parent_tag,     /* parent */
1250             ARGE_RING_ALIGN, 0,         /* alignment, boundary */
1251             BUS_SPACE_MAXADDR,          /* lowaddr */
1252             BUS_SPACE_MAXADDR,          /* highaddr */
1253             NULL, NULL,                 /* filter, filterarg */
1254             ARGE_TX_DMA_SIZE,           /* maxsize */
1255             1,                          /* nsegments */
1256             ARGE_TX_DMA_SIZE,           /* maxsegsize */
1257             0,                          /* flags */
1258             NULL, NULL,                 /* lockfunc, lockarg */
1259             &sc->arge_cdata.arge_tx_ring_tag);
1260         if (error != 0) {
1261                 device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n");
1262                 goto fail;
1263         }
1264
1265         /* Create tag for Rx ring. */
1266         error = bus_dma_tag_create(
1267             sc->arge_cdata.arge_parent_tag,     /* parent */
1268             ARGE_RING_ALIGN, 0,         /* alignment, boundary */
1269             BUS_SPACE_MAXADDR,          /* lowaddr */
1270             BUS_SPACE_MAXADDR,          /* highaddr */
1271             NULL, NULL,                 /* filter, filterarg */
1272             ARGE_RX_DMA_SIZE,           /* maxsize */
1273             1,                          /* nsegments */
1274             ARGE_RX_DMA_SIZE,           /* maxsegsize */
1275             0,                          /* flags */
1276             NULL, NULL,                 /* lockfunc, lockarg */
1277             &sc->arge_cdata.arge_rx_ring_tag);
1278         if (error != 0) {
1279                 device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n");
1280                 goto fail;
1281         }
1282
1283         /* Create tag for Tx buffers. */
1284         error = bus_dma_tag_create(
1285             sc->arge_cdata.arge_parent_tag,     /* parent */
1286             sizeof(uint32_t), 0,        /* alignment, boundary */
1287             BUS_SPACE_MAXADDR,          /* lowaddr */
1288             BUS_SPACE_MAXADDR,          /* highaddr */
1289             NULL, NULL,                 /* filter, filterarg */
1290             MCLBYTES * ARGE_MAXFRAGS,   /* maxsize */
1291             ARGE_MAXFRAGS,              /* nsegments */
1292             MCLBYTES,                   /* maxsegsize */
1293             0,                          /* flags */
1294             NULL, NULL,                 /* lockfunc, lockarg */
1295             &sc->arge_cdata.arge_tx_tag);
1296         if (error != 0) {
1297                 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1298                 goto fail;
1299         }
1300
1301         /* Create tag for Rx buffers. */
1302         error = bus_dma_tag_create(
1303             sc->arge_cdata.arge_parent_tag,     /* parent */
1304             ARGE_RX_ALIGN, 0,           /* alignment, boundary */
1305             BUS_SPACE_MAXADDR,          /* lowaddr */
1306             BUS_SPACE_MAXADDR,          /* highaddr */
1307             NULL, NULL,                 /* filter, filterarg */
1308             MCLBYTES,                   /* maxsize */
1309             ARGE_MAXFRAGS,              /* nsegments */
1310             MCLBYTES,                   /* maxsegsize */
1311             0,                          /* flags */
1312             NULL, NULL,                 /* lockfunc, lockarg */
1313             &sc->arge_cdata.arge_rx_tag);
1314         if (error != 0) {
1315                 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1316                 goto fail;
1317         }
1318
1319         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1320         error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1321             (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1322             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map);
1323         if (error != 0) {
1324                 device_printf(sc->arge_dev,
1325                     "failed to allocate DMA'able memory for Tx ring\n");
1326                 goto fail;
1327         }
1328
1329         ctx.arge_busaddr = 0;
1330         error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1331             sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1332             ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1333         if (error != 0 || ctx.arge_busaddr == 0) {
1334                 device_printf(sc->arge_dev,
1335                     "failed to load DMA'able memory for Tx ring\n");
1336                 goto fail;
1337         }
1338         sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1339
1340         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1341         error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1342             (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1343             BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map);
1344         if (error != 0) {
1345                 device_printf(sc->arge_dev,
1346                     "failed to allocate DMA'able memory for Rx ring\n");
1347                 goto fail;
1348         }
1349
1350         ctx.arge_busaddr = 0;
1351         error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1352             sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1353             ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1354         if (error != 0 || ctx.arge_busaddr == 0) {
1355                 device_printf(sc->arge_dev,
1356                     "failed to load DMA'able memory for Rx ring\n");
1357                 goto fail;
1358         }
1359         sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
1360
1361         /* Create DMA maps for Tx buffers. */
1362         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1363                 txd = &sc->arge_cdata.arge_txdesc[i];
1364                 txd->tx_m = NULL;
1365                 txd->tx_dmamap = NULL;
1366                 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
1367                     &txd->tx_dmamap);
1368                 if (error != 0) {
1369                         device_printf(sc->arge_dev,
1370                             "failed to create Tx dmamap\n");
1371                         goto fail;
1372                 }
1373         }
1374         /* Create DMA maps for Rx buffers. */
1375         if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1376             &sc->arge_cdata.arge_rx_sparemap)) != 0) {
1377                 device_printf(sc->arge_dev,
1378                     "failed to create spare Rx dmamap\n");
1379                 goto fail;
1380         }
1381         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1382                 rxd = &sc->arge_cdata.arge_rxdesc[i];
1383                 rxd->rx_m = NULL;
1384                 rxd->rx_dmamap = NULL;
1385                 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
1386                     &rxd->rx_dmamap);
1387                 if (error != 0) {
1388                         device_printf(sc->arge_dev,
1389                             "failed to create Rx dmamap\n");
1390                         goto fail;
1391                 }
1392         }
1393
1394 fail:
1395         return (error);
1396 }
1397
1398 static void
1399 arge_dma_free(struct arge_softc *sc)
1400 {
1401         struct arge_txdesc      *txd;
1402         struct arge_rxdesc      *rxd;
1403         int                     i;
1404
1405         /* Tx ring. */
1406         if (sc->arge_cdata.arge_tx_ring_tag) {
1407                 if (sc->arge_cdata.arge_tx_ring_map)
1408                         bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
1409                             sc->arge_cdata.arge_tx_ring_map);
1410                 if (sc->arge_cdata.arge_tx_ring_map &&
1411                     sc->arge_rdata.arge_tx_ring)
1412                         bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
1413                             sc->arge_rdata.arge_tx_ring,
1414                             sc->arge_cdata.arge_tx_ring_map);
1415                 sc->arge_rdata.arge_tx_ring = NULL;
1416                 sc->arge_cdata.arge_tx_ring_map = NULL;
1417                 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
1418                 sc->arge_cdata.arge_tx_ring_tag = NULL;
1419         }
1420         /* Rx ring. */
1421         if (sc->arge_cdata.arge_rx_ring_tag) {
1422                 if (sc->arge_cdata.arge_rx_ring_map)
1423                         bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
1424                             sc->arge_cdata.arge_rx_ring_map);
1425                 if (sc->arge_cdata.arge_rx_ring_map &&
1426                     sc->arge_rdata.arge_rx_ring)
1427                         bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
1428                             sc->arge_rdata.arge_rx_ring,
1429                             sc->arge_cdata.arge_rx_ring_map);
1430                 sc->arge_rdata.arge_rx_ring = NULL;
1431                 sc->arge_cdata.arge_rx_ring_map = NULL;
1432                 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
1433                 sc->arge_cdata.arge_rx_ring_tag = NULL;
1434         }
1435         /* Tx buffers. */
1436         if (sc->arge_cdata.arge_tx_tag) {
1437                 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1438                         txd = &sc->arge_cdata.arge_txdesc[i];
1439                         if (txd->tx_dmamap) {
1440                                 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
1441                                     txd->tx_dmamap);
1442                                 txd->tx_dmamap = NULL;
1443                         }
1444                 }
1445                 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
1446                 sc->arge_cdata.arge_tx_tag = NULL;
1447         }
1448         /* Rx buffers. */
1449         if (sc->arge_cdata.arge_rx_tag) {
1450                 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1451                         rxd = &sc->arge_cdata.arge_rxdesc[i];
1452                         if (rxd->rx_dmamap) {
1453                                 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1454                                     rxd->rx_dmamap);
1455                                 rxd->rx_dmamap = NULL;
1456                         }
1457                 }
1458                 if (sc->arge_cdata.arge_rx_sparemap) {
1459                         bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
1460                             sc->arge_cdata.arge_rx_sparemap);
1461                         sc->arge_cdata.arge_rx_sparemap = 0;
1462                 }
1463                 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
1464                 sc->arge_cdata.arge_rx_tag = NULL;
1465         }
1466
1467         if (sc->arge_cdata.arge_parent_tag) {
1468                 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
1469                 sc->arge_cdata.arge_parent_tag = NULL;
1470         }
1471 }
1472
1473 /*
1474  * Initialize the transmit descriptors.
1475  */
1476 static int
1477 arge_tx_ring_init(struct arge_softc *sc)
1478 {
1479         struct arge_ring_data   *rd;
1480         struct arge_txdesc      *txd;
1481         bus_addr_t              addr;
1482         int                     i;
1483
1484         sc->arge_cdata.arge_tx_prod = 0;
1485         sc->arge_cdata.arge_tx_cons = 0;
1486         sc->arge_cdata.arge_tx_cnt = 0;
1487
1488         rd = &sc->arge_rdata;
1489         bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
1490         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
1491                 if (i == ARGE_TX_RING_COUNT - 1)
1492                         addr = ARGE_TX_RING_ADDR(sc, 0);
1493                 else
1494                         addr = ARGE_TX_RING_ADDR(sc, i + 1);
1495                 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
1496                 rd->arge_tx_ring[i].next_desc = addr;
1497                 txd = &sc->arge_cdata.arge_txdesc[i];
1498                 txd->tx_m = NULL;
1499         }
1500
1501         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1502             sc->arge_cdata.arge_tx_ring_map,
1503             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1504
1505         return (0);
1506 }
1507
1508 /*
1509  * Initialize the RX descriptors and allocate mbufs for them. Note that
1510  * we arrange the descriptors in a closed ring, so that the last descriptor
1511  * points back to the first.
1512  */
1513 static int
1514 arge_rx_ring_init(struct arge_softc *sc)
1515 {
1516         struct arge_ring_data   *rd;
1517         struct arge_rxdesc      *rxd;
1518         bus_addr_t              addr;
1519         int                     i;
1520
1521         sc->arge_cdata.arge_rx_cons = 0;
1522
1523         rd = &sc->arge_rdata;
1524         bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
1525         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
1526                 rxd = &sc->arge_cdata.arge_rxdesc[i];
1527                 rxd->rx_m = NULL;
1528                 rxd->desc = &rd->arge_rx_ring[i];
1529                 if (i == ARGE_RX_RING_COUNT - 1)
1530                         addr = ARGE_RX_RING_ADDR(sc, 0);
1531                 else
1532                         addr = ARGE_RX_RING_ADDR(sc, i + 1);
1533                 rd->arge_rx_ring[i].next_desc = addr;
1534                 if (arge_newbuf(sc, i) != 0) {
1535                         return (ENOBUFS);
1536                 }
1537         }
1538
1539         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1540             sc->arge_cdata.arge_rx_ring_map,
1541             BUS_DMASYNC_PREWRITE);
1542
1543         return (0);
1544 }
1545
1546 /*
1547  * Initialize an RX descriptor and attach an MBUF cluster.
1548  */
1549 static int
1550 arge_newbuf(struct arge_softc *sc, int idx)
1551 {
1552         struct arge_desc                *desc;
1553         struct arge_rxdesc      *rxd;
1554         struct mbuf             *m;
1555         bus_dma_segment_t       segs[1];
1556         bus_dmamap_t            map;
1557         int                     nsegs;
1558
1559         m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1560         if (m == NULL)
1561                 return (ENOBUFS);
1562         m->m_len = m->m_pkthdr.len = MCLBYTES;
1563         m_adj(m, sizeof(uint64_t));
1564
1565         if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
1566             sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
1567                 m_freem(m);
1568                 return (ENOBUFS);
1569         }
1570         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
1571
1572         rxd = &sc->arge_cdata.arge_rxdesc[idx];
1573         if (rxd->rx_m != NULL) {
1574                 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
1575         }
1576         map = rxd->rx_dmamap;
1577         rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
1578         sc->arge_cdata.arge_rx_sparemap = map;
1579         rxd->rx_m = m;
1580         desc = rxd->desc;
1581         if (segs[0].ds_addr & 3)
1582                 panic("RX packet address unaligned");
1583         desc->packet_addr = segs[0].ds_addr;
1584         desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
1585
1586         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1587             sc->arge_cdata.arge_rx_ring_map,
1588             BUS_DMASYNC_PREWRITE);
1589
1590         return (0);
1591 }
1592
1593 static __inline void
1594 arge_fixup_rx(struct mbuf *m)
1595 {
1596         int             i;
1597         uint16_t        *src, *dst;
1598
1599         src = mtod(m, uint16_t *);
1600         dst = src - 1;
1601
1602         for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
1603                 *dst++ = *src++;
1604         }
1605
1606         if (m->m_len % sizeof(uint16_t))
1607                 *(uint8_t *)dst = *(uint8_t *)src;
1608
1609         m->m_data -= ETHER_ALIGN;
1610 }
1611
1612 #ifdef DEVICE_POLLING
1613 static int
1614 arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1615 {
1616         struct arge_softc *sc = ifp->if_softc;
1617         int rx_npkts = 0;
1618
1619         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1620                 ARGE_LOCK(sc);
1621                 arge_tx_locked(sc);
1622                 rx_npkts = arge_rx_locked(sc);
1623                 ARGE_UNLOCK(sc);
1624         }
1625
1626         return (rx_npkts);
1627 }
1628 #endif /* DEVICE_POLLING */
1629
1630
1631 static void
1632 arge_tx_locked(struct arge_softc *sc)
1633 {
1634         struct arge_txdesc      *txd;
1635         struct arge_desc        *cur_tx;
1636         struct ifnet            *ifp;
1637         uint32_t                ctrl;
1638         int                     cons, prod;
1639
1640         ARGE_LOCK_ASSERT(sc);
1641
1642         cons = sc->arge_cdata.arge_tx_cons;
1643         prod = sc->arge_cdata.arge_tx_prod;
1644
1645         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: cons=%d, prod=%d\n", __func__, cons, prod);
1646
1647         if (cons == prod)
1648                 return;
1649
1650         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1651             sc->arge_cdata.arge_tx_ring_map,
1652             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1653
1654         ifp = sc->arge_ifp;
1655         /*
1656          * Go through our tx list and free mbufs for those
1657          * frames that have been transmitted.
1658          */
1659         for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
1660                 cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
1661                 ctrl = cur_tx->packet_ctrl;
1662                 /* Check if descriptor has "finished" flag */
1663                 if ((ctrl & ARGE_DESC_EMPTY) == 0)
1664                         break;
1665
1666                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1667
1668                 sc->arge_cdata.arge_tx_cnt--;
1669                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1670
1671                 txd = &sc->arge_cdata.arge_txdesc[cons];
1672
1673                 ifp->if_opackets++;
1674
1675                 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1676                     BUS_DMASYNC_POSTWRITE);
1677                 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1678
1679                 /* Free only if it's first descriptor in list */
1680                 if (txd->tx_m)
1681                         m_freem(txd->tx_m);
1682                 txd->tx_m = NULL;
1683
1684                 /* reset descriptor */
1685                 cur_tx->packet_addr = 0;
1686         }
1687
1688         sc->arge_cdata.arge_tx_cons = cons;
1689
1690         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1691             sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
1692 }
1693
1694
1695 static int
1696 arge_rx_locked(struct arge_softc *sc)
1697 {
1698         struct arge_rxdesc      *rxd;
1699         struct ifnet            *ifp = sc->arge_ifp;
1700         int                     cons, prog, packet_len, i;
1701         struct arge_desc        *cur_rx;
1702         struct mbuf             *m;
1703         int                     rx_npkts = 0;
1704
1705         ARGE_LOCK_ASSERT(sc);
1706
1707         cons = sc->arge_cdata.arge_rx_cons;
1708
1709         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1710             sc->arge_cdata.arge_rx_ring_map,
1711             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1712
1713         for (prog = 0; prog < ARGE_RX_RING_COUNT; 
1714             ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
1715                 cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
1716                 rxd = &sc->arge_cdata.arge_rxdesc[cons];
1717                 m = rxd->rx_m;
1718
1719                 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
1720                        break;   
1721
1722                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1723
1724                 prog++;
1725
1726                 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
1727                 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
1728                     BUS_DMASYNC_POSTREAD);
1729                 m = rxd->rx_m;
1730
1731                 arge_fixup_rx(m);
1732                 m->m_pkthdr.rcvif = ifp;
1733                 /* Skip 4 bytes of CRC */
1734                 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
1735                 ifp->if_ipackets++;
1736                 rx_npkts++;
1737
1738                 ARGE_UNLOCK(sc);
1739                 (*ifp->if_input)(ifp, m);
1740                 ARGE_LOCK(sc);
1741                 cur_rx->packet_addr = 0;
1742         }
1743
1744         if (prog > 0) {
1745
1746                 i = sc->arge_cdata.arge_rx_cons;
1747                 for (; prog > 0 ; prog--) {
1748                         if (arge_newbuf(sc, i) != 0) {
1749                                 device_printf(sc->arge_dev, 
1750                                     "Failed to allocate buffer\n");
1751                                 break;
1752                         }
1753                         ARGE_INC(i, ARGE_RX_RING_COUNT);
1754                 }
1755
1756                 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
1757                     sc->arge_cdata.arge_rx_ring_map,
1758                     BUS_DMASYNC_PREWRITE);
1759
1760                 sc->arge_cdata.arge_rx_cons = cons;
1761         }
1762
1763         return (rx_npkts);
1764 }
1765
1766 static int
1767 arge_intr_filter(void *arg)
1768 {
1769         struct arge_softc       *sc = arg;
1770         uint32_t                status, ints;
1771
1772         status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1773         ints = ARGE_READ(sc, AR71XX_DMA_INTR);
1774
1775         ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints,
1776             "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1777             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1778         ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status, 
1779             "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
1780             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1781
1782         if (status & DMA_INTR_ALL) {
1783                 sc->arge_intr_status |= status;
1784                 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1785                 return (FILTER_SCHEDULE_THREAD);
1786         } 
1787
1788         sc->arge_intr_status = 0;
1789         return (FILTER_STRAY);
1790 }
1791
1792 static void
1793 arge_intr(void *arg)
1794 {
1795         struct arge_softc       *sc = arg;
1796         uint32_t                status;
1797         struct ifnet            *ifp = sc->arge_ifp;
1798
1799         status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
1800         status |= sc->arge_intr_status;
1801
1802         ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status, 
1803             "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD"
1804             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
1805
1806         /* 
1807          * Is it our interrupt at all? 
1808          */
1809         if (status == 0)
1810                 return;
1811
1812         if (status & DMA_INTR_RX_BUS_ERROR) {
1813                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
1814                 device_printf(sc->arge_dev, "RX bus error");
1815                 return;
1816         }
1817
1818         if (status & DMA_INTR_TX_BUS_ERROR) {
1819                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
1820                 device_printf(sc->arge_dev, "TX bus error");
1821                 return;
1822         }
1823
1824         ARGE_LOCK(sc);
1825
1826         if (status & DMA_INTR_RX_PKT_RCVD)
1827                 arge_rx_locked(sc);
1828
1829         /* 
1830          * RX overrun disables the receiver. 
1831          * Clear indication and re-enable rx. 
1832          */
1833         if ( status & DMA_INTR_RX_OVERFLOW) {
1834                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
1835                 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1836                 sc->stats.rx_overflow++;
1837         }
1838
1839         if (status & DMA_INTR_TX_PKT_SENT)
1840                 arge_tx_locked(sc);
1841         /* 
1842          * Underrun turns off TX. Clear underrun indication. 
1843          * If there's anything left in the ring, reactivate the tx. 
1844          */
1845         if (status & DMA_INTR_TX_UNDERRUN) {
1846                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
1847                 sc->stats.tx_underflow++;
1848                 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: TX underrun; tx_cnt=%d\n", __func__, sc->arge_cdata.arge_tx_cnt);
1849                 if (sc->arge_cdata.arge_tx_cnt > 0 ) {
1850                         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 
1851                             DMA_TX_CONTROL_EN);
1852                 }
1853         }
1854
1855         /*
1856          * If we've finished TXing and there's space for more packets
1857          * to be queued for TX, do so. Otherwise we may end up in a
1858          * situation where the interface send queue was filled
1859          * whilst the hardware queue was full, then the hardware
1860          * queue was drained by the interface send queue wasn't,
1861          * and thus if_start() is never called to kick-start
1862          * the send process (and all subsequent packets are simply
1863          * discarded.
1864          *
1865          * XXX TODO: make sure that the hardware deals nicely
1866          * with the possibility of the queue being enabled above
1867          * after a TX underrun, then having the hardware queue added
1868          * to below.
1869          */
1870         if (status & (DMA_INTR_TX_PKT_SENT | DMA_INTR_TX_UNDERRUN) &&
1871             (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
1872                 if (!IFQ_IS_EMPTY(&ifp->if_snd))
1873                         arge_start_locked(ifp);
1874         }
1875
1876         /*
1877          * We handled all bits, clear status
1878          */
1879         sc->arge_intr_status = 0;
1880         ARGE_UNLOCK(sc);
1881         /*
1882          * re-enable all interrupts 
1883          */
1884         ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1885 }
1886
1887
1888 static void
1889 arge_tick(void *xsc)
1890 {
1891         struct arge_softc       *sc = xsc;
1892         struct mii_data         *mii;
1893
1894         ARGE_LOCK_ASSERT(sc);
1895
1896         if (sc->arge_miibus) {
1897                 mii = device_get_softc(sc->arge_miibus);
1898                 mii_tick(mii);
1899                 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1900         }
1901 }
1902
1903 int
1904 arge_multiphy_mediachange(struct ifnet *ifp)
1905 {
1906         struct arge_softc *sc = ifp->if_softc;
1907         struct ifmedia *ifm = &sc->arge_ifmedia;
1908         struct ifmedia_entry *ife = ifm->ifm_cur;
1909
1910         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1911                 return (EINVAL);
1912
1913         if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
1914                 device_printf(sc->arge_dev, 
1915                     "AUTO is not supported for multiphy MAC");
1916                 return (EINVAL);
1917         }
1918
1919         /*
1920          * Ignore everything
1921          */
1922         return (0);
1923 }
1924
1925 void
1926 arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
1927 {
1928         struct arge_softc *sc = ifp->if_softc;
1929
1930         ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1931         ifmr->ifm_active = IFM_ETHER | sc->arge_media_type | 
1932             sc->arge_duplex_mode;
1933 }
1934