]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/mips/atheros/if_arge.c
As <machine/pmap.h> is included from <vm/pmap.h>, there is no need to
[FreeBSD/FreeBSD.git] / sys / mips / atheros / if_arge.c
1 /*-
2  * Copyright (c) 2009, Oleksandr Tymoshenko
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 /*
32  * AR71XX gigabit ethernet driver
33  */
34 #ifdef HAVE_KERNEL_OPTION_HEADERS
35 #include "opt_device_polling.h"
36 #endif
37
38 #include "opt_arge.h"
39
40 #include <sys/param.h>
41 #include <sys/endian.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
44 #include <sys/lock.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/kernel.h>
49 #include <sys/module.h>
50 #include <sys/socket.h>
51 #include <sys/taskqueue.h>
52 #include <sys/sysctl.h>
53
54 #include <net/if.h>
55 #include <net/if_var.h>
56 #include <net/if_media.h>
57 #include <net/ethernet.h>
58 #include <net/if_types.h>
59
60 #include <net/bpf.h>
61
62 #include <machine/bus.h>
63 #include <machine/cache.h>
64 #include <machine/resource.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm.h>
67 #include <vm/pmap.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70
71 #include <dev/mii/mii.h>
72 #include <dev/mii/miivar.h>
73
74 #include <dev/pci/pcireg.h>
75 #include <dev/pci/pcivar.h>
76
77 #include "opt_arge.h"
78
79 #if defined(ARGE_MDIO)
80 #include <dev/mdio/mdio.h>
81 #include <dev/etherswitch/miiproxy.h>
82 #include "mdio_if.h"
83 #endif
84
85
86 MODULE_DEPEND(arge, ether, 1, 1, 1);
87 MODULE_DEPEND(arge, miibus, 1, 1, 1);
88 MODULE_VERSION(arge, 1);
89
90 #include "miibus_if.h"
91
92 #include <net/ethernet.h>
93
94 #include <mips/atheros/ar71xxreg.h>
95 #include <mips/atheros/ar934xreg.h>     /* XXX tsk! */
96 #include <mips/atheros/qca953xreg.h>    /* XXX tsk! */
97 #include <mips/atheros/qca955xreg.h>    /* XXX tsk! */
98 #include <mips/atheros/if_argevar.h>
99 #include <mips/atheros/ar71xx_setup.h>
100 #include <mips/atheros/ar71xx_cpudef.h>
101 #include <mips/atheros/ar71xx_macaddr.h>
102
103 typedef enum {
104         ARGE_DBG_MII    =       0x00000001,
105         ARGE_DBG_INTR   =       0x00000002,
106         ARGE_DBG_TX     =       0x00000004,
107         ARGE_DBG_RX     =       0x00000008,
108         ARGE_DBG_ERR    =       0x00000010,
109         ARGE_DBG_RESET  =       0x00000020,
110         ARGE_DBG_PLL    =       0x00000040,
111 } arge_debug_flags;
112
113 static const char * arge_miicfg_str[] = {
114         "NONE",
115         "GMII",
116         "MII",
117         "RGMII",
118         "RMII",
119         "SGMII"
120 };
121
122 #ifdef ARGE_DEBUG
123 #define ARGEDEBUG(_sc, _m, ...)                                         \
124         do {                                                            \
125                 if ((_m) & (_sc)->arge_debug)                           \
126                         device_printf((_sc)->arge_dev, __VA_ARGS__);    \
127         } while (0)
128 #else
129 #define ARGEDEBUG(_sc, _m, ...)
130 #endif
131
132 static int arge_attach(device_t);
133 static int arge_detach(device_t);
134 static void arge_flush_ddr(struct arge_softc *);
135 static int arge_ifmedia_upd(struct ifnet *);
136 static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
137 static int arge_ioctl(struct ifnet *, u_long, caddr_t);
138 static void arge_init(void *);
139 static void arge_init_locked(struct arge_softc *);
140 static void arge_link_task(void *, int);
141 static void arge_update_link_locked(struct arge_softc *sc);
142 static void arge_set_pll(struct arge_softc *, int, int);
143 static int arge_miibus_readreg(device_t, int, int);
144 static void arge_miibus_statchg(device_t);
145 static int arge_miibus_writereg(device_t, int, int, int);
146 static int arge_probe(device_t);
147 static void arge_reset_dma(struct arge_softc *);
148 static int arge_resume(device_t);
149 static int arge_rx_ring_init(struct arge_softc *);
150 static void arge_rx_ring_free(struct arge_softc *sc);
151 static int arge_tx_ring_init(struct arge_softc *);
152 static void arge_tx_ring_free(struct arge_softc *);
153 #ifdef DEVICE_POLLING
154 static int arge_poll(struct ifnet *, enum poll_cmd, int);
155 #endif
156 static int arge_shutdown(device_t);
157 static void arge_start(struct ifnet *);
158 static void arge_start_locked(struct ifnet *);
159 static void arge_stop(struct arge_softc *);
160 static int arge_suspend(device_t);
161
162 static int arge_rx_locked(struct arge_softc *);
163 static void arge_tx_locked(struct arge_softc *);
164 static void arge_intr(void *);
165 static int arge_intr_filter(void *);
166 static void arge_tick(void *);
167
168 static void arge_hinted_child(device_t bus, const char *dname, int dunit);
169
170 /*
171  * ifmedia callbacks for multiPHY MAC
172  */
173 void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *);
174 int arge_multiphy_mediachange(struct ifnet *);
175
176 static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int);
177 static int arge_dma_alloc(struct arge_softc *);
178 static void arge_dma_free(struct arge_softc *);
179 static int arge_newbuf(struct arge_softc *, int);
180 static __inline void arge_fixup_rx(struct mbuf *);
181
182 static device_method_t arge_methods[] = {
183         /* Device interface */
184         DEVMETHOD(device_probe,         arge_probe),
185         DEVMETHOD(device_attach,        arge_attach),
186         DEVMETHOD(device_detach,        arge_detach),
187         DEVMETHOD(device_suspend,       arge_suspend),
188         DEVMETHOD(device_resume,        arge_resume),
189         DEVMETHOD(device_shutdown,      arge_shutdown),
190
191         /* MII interface */
192         DEVMETHOD(miibus_readreg,       arge_miibus_readreg),
193         DEVMETHOD(miibus_writereg,      arge_miibus_writereg),
194         DEVMETHOD(miibus_statchg,       arge_miibus_statchg),
195
196         /* bus interface */
197         DEVMETHOD(bus_add_child,        device_add_child_ordered),
198         DEVMETHOD(bus_hinted_child,     arge_hinted_child),
199
200         DEVMETHOD_END
201 };
202
203 static driver_t arge_driver = {
204         "arge",
205         arge_methods,
206         sizeof(struct arge_softc)
207 };
208
209 static devclass_t arge_devclass;
210
211 DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0);
212 DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0);
213
214 #if defined(ARGE_MDIO)
215 static int argemdio_probe(device_t);
216 static int argemdio_attach(device_t);
217 static int argemdio_detach(device_t);
218
219 /*
220  * Declare an additional, separate driver for accessing the MDIO bus.
221  */
222 static device_method_t argemdio_methods[] = {
223         /* Device interface */
224         DEVMETHOD(device_probe,         argemdio_probe),
225         DEVMETHOD(device_attach,        argemdio_attach),
226         DEVMETHOD(device_detach,        argemdio_detach),
227
228         /* bus interface */
229         DEVMETHOD(bus_add_child,        device_add_child_ordered),
230         
231         /* MDIO access */
232         DEVMETHOD(mdio_readreg,         arge_miibus_readreg),
233         DEVMETHOD(mdio_writereg,        arge_miibus_writereg),
234 };
235
236 DEFINE_CLASS_0(argemdio, argemdio_driver, argemdio_methods,
237     sizeof(struct arge_softc));
238 static devclass_t argemdio_devclass;
239
240 DRIVER_MODULE(miiproxy, arge, miiproxy_driver, miiproxy_devclass, 0, 0);
241 DRIVER_MODULE(argemdio, nexus, argemdio_driver, argemdio_devclass, 0, 0);
242 DRIVER_MODULE(mdio, argemdio, mdio_driver, mdio_devclass, 0, 0);
243 #endif
244
245 static struct mtx miibus_mtx;
246
247 MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF);
248
249 /*
250  * Flushes all
251  *
252  * XXX this needs to be done at interrupt time! Grr!
253  */
254 static void
255 arge_flush_ddr(struct arge_softc *sc)
256 {
257         switch (sc->arge_mac_unit) {
258         case 0:
259                 ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_GE0);
260                 break;
261         case 1:
262                 ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_GE1);
263                 break;
264         default:
265                 device_printf(sc->arge_dev, "%s: unknown unit (%d)\n",
266                     __func__,
267                     sc->arge_mac_unit);
268                 break;
269         }
270 }
271
272 static int
273 arge_probe(device_t dev)
274 {
275
276         device_set_desc(dev, "Atheros AR71xx built-in ethernet interface");
277         return (BUS_PROBE_NOWILDCARD);
278 }
279
280 #ifdef  ARGE_DEBUG
281 static void
282 arge_attach_intr_sysctl(device_t dev, struct sysctl_oid_list *parent)
283 {
284         struct arge_softc *sc = device_get_softc(dev);
285         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
286         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
287         struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
288         char sn[8];
289         int i;
290
291         tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "intr",
292             CTLFLAG_RD, NULL, "Interrupt statistics");
293         child = SYSCTL_CHILDREN(tree);
294         for (i = 0; i < 32; i++) {
295                 snprintf(sn, sizeof(sn), "%d", i);
296                 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, sn, CTLFLAG_RD,
297                     &sc->intr_stats.count[i], 0, "");
298         }
299 }
300 #endif
301
302 static void
303 arge_attach_sysctl(device_t dev)
304 {
305         struct arge_softc *sc = device_get_softc(dev);
306         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
307         struct sysctl_oid *tree = device_get_sysctl_tree(dev);
308
309 #ifdef  ARGE_DEBUG
310         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
311                 "debug", CTLFLAG_RW, &sc->arge_debug, 0,
312                 "arge interface debugging flags");
313         arge_attach_intr_sysctl(dev, SYSCTL_CHILDREN(tree));
314 #endif
315
316         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
317                 "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0,
318                 "number of TX aligned packets");
319
320         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
321                 "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned,
322                 0, "number of TX unaligned packets");
323
324         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
325                 "tx_pkts_unaligned_start", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned_start,
326                 0, "number of TX unaligned packets (start)");
327
328         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
329                 "tx_pkts_unaligned_len", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned_len,
330                 0, "number of TX unaligned packets (len)");
331
332         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
333                 "tx_pkts_nosegs", CTLFLAG_RW, &sc->stats.tx_pkts_nosegs,
334                 0, "number of TX packets fail with no ring slots avail");
335
336         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
337                 "intr_stray_filter", CTLFLAG_RW, &sc->stats.intr_stray,
338                 0, "number of stray interrupts (filter)");
339
340         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
341                 "intr_stray_intr", CTLFLAG_RW, &sc->stats.intr_stray2,
342                 0, "number of stray interrupts (intr)");
343
344         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
345                 "intr_ok", CTLFLAG_RW, &sc->stats.intr_ok,
346                 0, "number of OK interrupts");
347 #ifdef  ARGE_DEBUG
348         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod",
349             CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, "");
350         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons",
351             CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, "");
352         SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt",
353             CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, "");
354 #endif
355 }
356
357 static void
358 arge_reset_mac(struct arge_softc *sc)
359 {
360         uint32_t reg;
361         uint32_t reset_reg;
362
363         ARGEDEBUG(sc, ARGE_DBG_RESET, "%s called\n", __func__);
364
365         /* Step 1. Soft-reset MAC */
366         ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET);
367         DELAY(20);
368
369         /* Step 2. Punt the MAC core from the central reset register */
370         /*
371          * XXX TODO: migrate this (and other) chip specific stuff into
372          * a chipdef method.
373          */
374         if (sc->arge_mac_unit == 0) {
375                 reset_reg = RST_RESET_GE0_MAC;
376         } else {
377                 reset_reg = RST_RESET_GE1_MAC;
378         }
379
380         /*
381          * AR934x (and later) also needs the MDIO block reset.
382          * XXX should methodize this!
383          */
384         if (ar71xx_soc == AR71XX_SOC_AR9341 ||
385            ar71xx_soc == AR71XX_SOC_AR9342 ||
386            ar71xx_soc == AR71XX_SOC_AR9344) {
387                 if (sc->arge_mac_unit == 0) {
388                         reset_reg |= AR934X_RESET_GE0_MDIO;
389                 } else {
390                         reset_reg |= AR934X_RESET_GE1_MDIO;
391                 }
392         }
393
394         if (ar71xx_soc == AR71XX_SOC_QCA9556 ||
395            ar71xx_soc == AR71XX_SOC_QCA9558) {
396                 if (sc->arge_mac_unit == 0) {
397                         reset_reg |= QCA955X_RESET_GE0_MDIO;
398                 } else {
399                         reset_reg |= QCA955X_RESET_GE1_MDIO;
400                 }
401         }
402
403         if (ar71xx_soc == AR71XX_SOC_QCA9533 ||
404            ar71xx_soc == AR71XX_SOC_QCA9533_V2) {
405                 if (sc->arge_mac_unit == 0) {
406                         reset_reg |= QCA953X_RESET_GE0_MDIO;
407                 } else {
408                         reset_reg |= QCA953X_RESET_GE1_MDIO;
409                 }
410         }
411
412         ar71xx_device_stop(reset_reg);
413         DELAY(100);
414         ar71xx_device_start(reset_reg);
415
416         /* Step 3. Reconfigure MAC block */
417         ARGE_WRITE(sc, AR71XX_MAC_CFG1,
418                 MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE |
419                 MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE);
420
421         reg = ARGE_READ(sc, AR71XX_MAC_CFG2);
422         reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ;
423         ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg);
424
425         ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536);
426 }
427
428 /*
429  * These values map to the divisor values programmed into
430  * AR71XX_MAC_MII_CFG.
431  *
432  * The index of each value corresponds to the divisor section
433  * value in AR71XX_MAC_MII_CFG (ie, table[0] means '0' in
434  * AR71XX_MAC_MII_CFG, table[1] means '1', etc.)
435  */
436 static const uint32_t ar71xx_mdio_div_table[] = {
437         4, 4, 6, 8, 10, 14, 20, 28,
438 };
439
440 static const uint32_t ar7240_mdio_div_table[] = {
441         2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96,
442 };
443
444 static const uint32_t ar933x_mdio_div_table[] = {
445         4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98,
446 };
447
448 /*
449  * Lookup the divisor to use based on the given frequency.
450  *
451  * Returns the divisor to use, or -ve on error.
452  */
453 static int
454 arge_mdio_get_divider(struct arge_softc *sc, unsigned long mdio_clock)
455 {
456         unsigned long ref_clock, t;
457         const uint32_t *table;
458         int ndivs;
459         int i;
460
461         /*
462          * This is the base MDIO frequency on the SoC.
463          * The dividers .. well, divide. Duh.
464          */
465         ref_clock = ar71xx_mdio_freq();
466
467         /*
468          * If either clock is undefined, just tell the
469          * caller to fall through to the defaults.
470          */
471         if (ref_clock == 0 || mdio_clock == 0)
472                 return (-EINVAL);
473
474         /*
475          * Pick the correct table!
476          */
477         switch (ar71xx_soc) {
478         case AR71XX_SOC_AR9330:
479         case AR71XX_SOC_AR9331:
480         case AR71XX_SOC_AR9341:
481         case AR71XX_SOC_AR9342:
482         case AR71XX_SOC_AR9344:
483         case AR71XX_SOC_QCA9533:
484         case AR71XX_SOC_QCA9533_V2:
485         case AR71XX_SOC_QCA9556:
486         case AR71XX_SOC_QCA9558:
487                 table = ar933x_mdio_div_table;
488                 ndivs = nitems(ar933x_mdio_div_table);
489                 break;
490
491         case AR71XX_SOC_AR7240:
492         case AR71XX_SOC_AR7241:
493         case AR71XX_SOC_AR7242:
494                 table = ar7240_mdio_div_table;
495                 ndivs = nitems(ar7240_mdio_div_table);
496                 break;
497
498         default:
499                 table = ar71xx_mdio_div_table;
500                 ndivs = nitems(ar71xx_mdio_div_table);
501         }
502
503         /*
504          * Now, walk through the list and find the first divisor
505          * that falls under the target MDIO frequency.
506          *
507          * The divisors go up, but the corresponding frequencies
508          * are actually decreasing.
509          */
510         for (i = 0; i < ndivs; i++) {
511                 t = ref_clock / table[i];
512                 if (t <= mdio_clock) {
513                         return (i);
514                 }
515         }
516
517         ARGEDEBUG(sc, ARGE_DBG_RESET,
518             "No divider found; MDIO=%lu Hz; target=%lu Hz\n",
519                 ref_clock, mdio_clock);
520         return (-ENOENT);
521 }
522
523 /*
524  * Fetch the MDIO bus clock rate.
525  *
526  * For now, the default is DIV_28 for everything
527  * bar AR934x, which will be DIV_58.
528  *
529  * It will definitely need updating to take into account
530  * the MDIO bus core clock rate and the target clock
531  * rate for the chip.
532  */
533 static uint32_t
534 arge_fetch_mdiobus_clock_rate(struct arge_softc *sc)
535 {
536         int mdio_freq, div;
537
538         /*
539          * Is the MDIO frequency defined? If so, find a divisor that
540          * makes reasonable sense.  Don't overshoot the frequency.
541          */
542         if (resource_int_value(device_get_name(sc->arge_dev),
543             device_get_unit(sc->arge_dev),
544             "mdio_freq",
545             &mdio_freq) == 0) {
546                 sc->arge_mdiofreq = mdio_freq;
547                 div = arge_mdio_get_divider(sc, sc->arge_mdiofreq);
548                 if (bootverbose)
549                         device_printf(sc->arge_dev,
550                             "%s: mdio ref freq=%llu Hz, target freq=%llu Hz,"
551                             " divisor index=%d\n",
552                             __func__,
553                             (unsigned long long) ar71xx_mdio_freq(),
554                             (unsigned long long) mdio_freq,
555                             div);
556                 if (div >= 0)
557                         return (div);
558         }
559
560         /*
561          * Default value(s).
562          *
563          * XXX obviously these need .. fixing.
564          *
565          * From Linux/OpenWRT:
566          *
567          * + 7240? DIV_6
568          * + Builtin-switch port and not 934x? DIV_10
569          * + Not built-in switch port and 934x? DIV_58
570          * + .. else DIV_28.
571          */
572         switch (ar71xx_soc) {
573         case AR71XX_SOC_AR9341:
574         case AR71XX_SOC_AR9342:
575         case AR71XX_SOC_AR9344:
576         case AR71XX_SOC_QCA9533:
577         case AR71XX_SOC_QCA9533_V2:
578         case AR71XX_SOC_QCA9556:
579         case AR71XX_SOC_QCA9558:
580                 return (MAC_MII_CFG_CLOCK_DIV_58);
581                 break;
582         default:
583                 return (MAC_MII_CFG_CLOCK_DIV_28);
584         }
585 }
586
587 static void
588 arge_reset_miibus(struct arge_softc *sc)
589 {
590         uint32_t mdio_div;
591
592         mdio_div = arge_fetch_mdiobus_clock_rate(sc);
593
594         /*
595          * XXX AR934x and later; should we be also resetting the
596          * MDIO block(s) using the reset register block?
597          */
598
599         /* Reset MII bus; program in the default divisor */
600         ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET | mdio_div);
601         DELAY(100);
602         ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, mdio_div);
603         DELAY(100);
604 }
605
606 static void
607 arge_fetch_pll_config(struct arge_softc *sc)
608 {
609         long int val;
610
611         if (resource_long_value(device_get_name(sc->arge_dev),
612             device_get_unit(sc->arge_dev),
613             "pll_10", &val) == 0) {
614                 sc->arge_pllcfg.pll_10 = val;
615                 device_printf(sc->arge_dev, "%s: pll_10 = 0x%x\n",
616                     __func__, (int) val);
617         }
618         if (resource_long_value(device_get_name(sc->arge_dev),
619             device_get_unit(sc->arge_dev),
620             "pll_100", &val) == 0) {
621                 sc->arge_pllcfg.pll_100 = val;
622                 device_printf(sc->arge_dev, "%s: pll_100 = 0x%x\n",
623                     __func__, (int) val);
624         }
625         if (resource_long_value(device_get_name(sc->arge_dev),
626             device_get_unit(sc->arge_dev),
627             "pll_1000", &val) == 0) {
628                 sc->arge_pllcfg.pll_1000 = val;
629                 device_printf(sc->arge_dev, "%s: pll_1000 = 0x%x\n",
630                     __func__, (int) val);
631         }
632 }
633
634 static int
635 arge_attach(device_t dev)
636 {
637         struct ifnet            *ifp;
638         struct arge_softc       *sc;
639         int                     error = 0, rid, i;
640         uint32_t                hint;
641         long                    eeprom_mac_addr = 0;
642         int                     miicfg = 0;
643         int                     readascii = 0;
644         int                     local_mac = 0;
645         uint8_t                 local_macaddr[ETHER_ADDR_LEN];
646         char *                  local_macstr;
647         char                    devid_str[32];
648         int                     count;
649
650         sc = device_get_softc(dev);
651         sc->arge_dev = dev;
652         sc->arge_mac_unit = device_get_unit(dev);
653
654         /*
655          * See if there's a "board" MAC address hint available for
656          * this particular device.
657          *
658          * This is in the environment - it'd be nice to use the resource_*()
659          * routines, but at the moment the system is booting, the resource hints
660          * are set to the 'static' map so they're not pulling from kenv.
661          */
662         snprintf(devid_str, 32, "hint.%s.%d.macaddr",
663             device_get_name(dev),
664             device_get_unit(dev));
665         if ((local_macstr = kern_getenv(devid_str)) != NULL) {
666                 uint32_t tmpmac[ETHER_ADDR_LEN];
667
668                 /* Have a MAC address; should use it */
669                 device_printf(dev, "Overriding MAC address from environment: '%s'\n",
670                     local_macstr);
671
672                 /* Extract out the MAC address */
673                 /* XXX this should all be a generic method */
674                 count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x",
675                     &tmpmac[0], &tmpmac[1],
676                     &tmpmac[2], &tmpmac[3],
677                     &tmpmac[4], &tmpmac[5]);
678                 if (count == 6) {
679                         /* Valid! */
680                         local_mac = 1;
681                         for (i = 0; i < ETHER_ADDR_LEN; i++)
682                                 local_macaddr[i] = tmpmac[i];
683                 }
684                 /* Done! */
685                 freeenv(local_macstr);
686                 local_macstr = NULL;
687         }
688
689         /*
690          * Hardware workarounds.
691          */
692         switch (ar71xx_soc) {
693         case AR71XX_SOC_AR9330:
694         case AR71XX_SOC_AR9331:
695         case AR71XX_SOC_AR9341:
696         case AR71XX_SOC_AR9342:
697         case AR71XX_SOC_AR9344:
698         case AR71XX_SOC_QCA9533:
699         case AR71XX_SOC_QCA9533_V2:
700         case AR71XX_SOC_QCA9556:
701         case AR71XX_SOC_QCA9558:
702                 /* Arbitrary alignment */
703                 sc->arge_hw_flags |= ARGE_HW_FLG_TX_DESC_ALIGN_1BYTE;
704                 sc->arge_hw_flags |= ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE;
705                 break;
706         default:
707                 sc->arge_hw_flags |= ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE;
708                 sc->arge_hw_flags |= ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE;
709                 break;
710         }
711
712         /*
713          * Some units (eg the TP-Link WR-1043ND) do not have a convenient
714          * EEPROM location to read the ethernet MAC address from.
715          * OpenWRT simply snaffles it from a fixed location.
716          *
717          * Since multiple units seem to use this feature, include
718          * a method of setting the MAC address based on an flash location
719          * in CPU address space.
720          *
721          * Some vendors have decided to store the mac address as a literal
722          * string of 18 characters in xx:xx:xx:xx:xx:xx format instead of
723          * an array of numbers.  Expose a hint to turn on this conversion
724          * feature via strtol()
725          */
726          if (local_mac == 0 && resource_long_value(device_get_name(dev),
727              device_get_unit(dev), "eeprommac", &eeprom_mac_addr) == 0) {
728                 local_mac = 1;
729                 int i;
730                 const char *mac =
731                     (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr);
732                 device_printf(dev, "Overriding MAC from EEPROM\n");
733                 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
734                         "readascii", &readascii) == 0) {
735                         device_printf(dev, "Vendor stores MAC in ASCII format\n");
736                         for (i = 0; i < 6; i++) {
737                                 local_macaddr[i] = strtol(&(mac[i*3]), NULL, 16);
738                         }
739                 } else {
740                         for (i = 0; i < 6; i++) {
741                                 local_macaddr[i] = mac[i];
742                         }
743                 }
744         }
745
746         KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)),
747             ("if_arge: Only MAC0 and MAC1 supported"));
748
749         /*
750          * Fetch the PLL configuration.
751          */
752         arge_fetch_pll_config(sc);
753
754         /*
755          * Get the MII configuration, if applicable.
756          */
757         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
758             "miimode", &miicfg) == 0) {
759                 /* XXX bounds check? */
760                 device_printf(dev, "%s: overriding MII mode to '%s'\n",
761                     __func__, arge_miicfg_str[miicfg]);
762                 sc->arge_miicfg = miicfg;
763         }
764
765         /*
766          *  Get which PHY of 5 available we should use for this unit
767          */
768         if (resource_int_value(device_get_name(dev), device_get_unit(dev), 
769             "phymask", &sc->arge_phymask) != 0) {
770                 /*
771                  * Use port 4 (WAN) for GE0. For any other port use
772                  * its PHY the same as its unit number
773                  */
774                 if (sc->arge_mac_unit == 0)
775                         sc->arge_phymask = (1 << 4);
776                 else
777                         /* Use all phys up to 4 */
778                         sc->arge_phymask = (1 << 4) - 1;
779
780                 device_printf(dev, "No PHY specified, using mask %d\n", sc->arge_phymask);
781         }
782
783         /*
784          * Get default/hard-coded media & duplex mode.
785          */
786         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
787             "media", &hint) != 0)
788                 hint = 0;
789
790         if (hint == 1000)
791                 sc->arge_media_type = IFM_1000_T;
792         else if (hint == 100)
793                 sc->arge_media_type = IFM_100_TX;
794         else if (hint == 10)
795                 sc->arge_media_type = IFM_10_T;
796         else
797                 sc->arge_media_type = 0;
798
799         if (resource_int_value(device_get_name(dev), device_get_unit(dev),
800             "fduplex", &hint) != 0)
801                 hint = 1;
802
803         if (hint)
804                 sc->arge_duplex_mode = IFM_FDX;
805         else
806                 sc->arge_duplex_mode = 0;
807
808         mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
809             MTX_DEF);
810         callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0);
811         TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc);
812
813         /* Map control/status registers. */
814         sc->arge_rid = 0;
815         sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 
816             &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE);
817
818         if (sc->arge_res == NULL) {
819                 device_printf(dev, "couldn't map memory\n");
820                 error = ENXIO;
821                 goto fail;
822         }
823
824         /* Allocate interrupts */
825         rid = 0;
826         sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
827             RF_SHAREABLE | RF_ACTIVE);
828
829         if (sc->arge_irq == NULL) {
830                 device_printf(dev, "couldn't map interrupt\n");
831                 error = ENXIO;
832                 goto fail;
833         }
834
835         /* Allocate ifnet structure. */
836         ifp = sc->arge_ifp = if_alloc(IFT_ETHER);
837
838         if (ifp == NULL) {
839                 device_printf(dev, "couldn't allocate ifnet structure\n");
840                 error = ENOSPC;
841                 goto fail;
842         }
843
844         ifp->if_softc = sc;
845         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
846         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
847         ifp->if_ioctl = arge_ioctl;
848         ifp->if_start = arge_start;
849         ifp->if_init = arge_init;
850         sc->arge_if_flags = ifp->if_flags;
851
852         /* XXX: add real size */
853         IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
854         ifp->if_snd.ifq_maxlen = ifqmaxlen;
855         IFQ_SET_READY(&ifp->if_snd);
856
857         /* Tell the upper layer(s) we support long frames. */
858         ifp->if_capabilities |= IFCAP_VLAN_MTU;
859
860         ifp->if_capenable = ifp->if_capabilities;
861 #ifdef DEVICE_POLLING
862         ifp->if_capabilities |= IFCAP_POLLING;
863 #endif
864
865         /* If there's a local mac defined, copy that in */
866         if (local_mac == 1) {
867                 (void) ar71xx_mac_addr_init(sc->arge_eaddr,
868                     local_macaddr, 0, 0);
869         } else {
870                 /*
871                  * No MAC address configured. Generate the random one.
872                  */
873                 if  (bootverbose)
874                         device_printf(dev,
875                             "Generating random ethernet address.\n");
876                 (void) ar71xx_mac_addr_random_init(sc->arge_eaddr);
877         }
878
879         if (arge_dma_alloc(sc) != 0) {
880                 error = ENXIO;
881                 goto fail;
882         }
883
884         /*
885          * Don't do this for the MDIO bus case - it's already done
886          * as part of the MDIO bus attachment.
887          *
888          * XXX TODO: if we don't do this, we don't ever release the MAC
889          * from reset and we can't use the port.  Now, if we define ARGE_MDIO
890          * but we /don't/ define two MDIO busses, then we can't actually
891          * use both MACs.
892          */
893 #if !defined(ARGE_MDIO)
894         /* Initialize the MAC block */
895         arge_reset_mac(sc);
896         arge_reset_miibus(sc);
897 #endif
898
899         /* Configure MII mode, just for convienence */
900         if (sc->arge_miicfg != 0)
901                 ar71xx_device_set_mii_if(sc->arge_mac_unit, sc->arge_miicfg);
902
903         /*
904          * Set all Ethernet address registers to the same initial values
905          * set all four addresses to 66-88-aa-cc-dd-ee
906          */
907         ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, (sc->arge_eaddr[2] << 24)
908             | (sc->arge_eaddr[3] << 16) | (sc->arge_eaddr[4] << 8)
909             | sc->arge_eaddr[5]);
910         ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (sc->arge_eaddr[0] << 8)
911             | sc->arge_eaddr[1]);
912
913         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0,
914             FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT);
915
916         /*
917          * SoC specific bits.
918          */
919         switch (ar71xx_soc) {
920                 case AR71XX_SOC_AR7240:
921                 case AR71XX_SOC_AR7241:
922                 case AR71XX_SOC_AR7242:
923                 case AR71XX_SOC_AR9330:
924                 case AR71XX_SOC_AR9331:
925                 case AR71XX_SOC_AR9341:
926                 case AR71XX_SOC_AR9342:
927                 case AR71XX_SOC_AR9344:
928                 case AR71XX_SOC_QCA9533:
929                 case AR71XX_SOC_QCA9533_V2:
930                 case AR71XX_SOC_QCA9556:
931                 case AR71XX_SOC_QCA9558:
932                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff);
933                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa);
934                         break;
935                 /* AR71xx, AR913x */
936                 default:
937                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000);
938                         ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff);
939         }
940
941         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH,
942             FIFO_RX_FILTMATCH_DEFAULT);
943
944         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
945             FIFO_RX_FILTMASK_DEFAULT);
946
947 #if defined(ARGE_MDIO)
948         sc->arge_miiproxy = mii_attach_proxy(sc->arge_dev);
949 #endif
950
951         device_printf(sc->arge_dev, "finishing attachment, phymask %04x"
952             ", proxy %s \n", sc->arge_phymask, sc->arge_miiproxy == NULL ?
953             "null" : "set");
954         for (i = 0; i < ARGE_NPHY; i++) {
955                 if (((1 << i) & sc->arge_phymask) != 0) {
956                         error = mii_attach(sc->arge_miiproxy != NULL ?
957                             sc->arge_miiproxy : sc->arge_dev,
958                             &sc->arge_miibus, sc->arge_ifp,
959                             arge_ifmedia_upd, arge_ifmedia_sts,
960                             BMSR_DEFCAPMASK, i, MII_OFFSET_ANY, 0);
961                         if (error != 0) {
962                                 device_printf(sc->arge_dev, "unable to attach"
963                                     " PHY %d: %d\n", i, error);
964                                 goto fail;
965                         }
966                 }
967         }
968
969         if (sc->arge_miibus == NULL) {
970                 /* no PHY, so use hard-coded values */
971                 ifmedia_init(&sc->arge_ifmedia, 0,
972                     arge_multiphy_mediachange,
973                     arge_multiphy_mediastatus);
974                 ifmedia_add(&sc->arge_ifmedia,
975                     IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode,
976                     0, NULL);
977                 ifmedia_set(&sc->arge_ifmedia,
978                     IFM_ETHER | sc->arge_media_type  | sc->arge_duplex_mode);
979                 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
980         }
981
982         /* Call MI attach routine. */
983         ether_ifattach(sc->arge_ifp, sc->arge_eaddr);
984
985         /* Hook interrupt last to avoid having to lock softc */
986         error = bus_setup_intr(sc->arge_dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE,
987             arge_intr_filter, arge_intr, sc, &sc->arge_intrhand);
988
989         if (error) {
990                 device_printf(sc->arge_dev, "couldn't set up irq\n");
991                 ether_ifdetach(sc->arge_ifp);
992                 goto fail;
993         }
994
995         /* setup sysctl variables */
996         arge_attach_sysctl(sc->arge_dev);
997
998 fail:
999         if (error) 
1000                 arge_detach(dev);
1001
1002         return (error);
1003 }
1004
1005 static int
1006 arge_detach(device_t dev)
1007 {
1008         struct arge_softc       *sc = device_get_softc(dev);
1009         struct ifnet            *ifp = sc->arge_ifp;
1010
1011         KASSERT(mtx_initialized(&sc->arge_mtx),
1012             ("arge mutex not initialized"));
1013
1014         /* These should only be active if attach succeeded */
1015         if (device_is_attached(dev)) {
1016                 ARGE_LOCK(sc);
1017                 sc->arge_detach = 1;
1018 #ifdef DEVICE_POLLING
1019                 if (ifp->if_capenable & IFCAP_POLLING)
1020                         ether_poll_deregister(ifp);
1021 #endif
1022
1023                 arge_stop(sc);
1024                 ARGE_UNLOCK(sc);
1025                 taskqueue_drain(taskqueue_swi, &sc->arge_link_task);
1026                 ether_ifdetach(ifp);
1027         }
1028
1029         if (sc->arge_miibus)
1030                 device_delete_child(dev, sc->arge_miibus);
1031
1032         if (sc->arge_miiproxy)
1033                 device_delete_child(dev, sc->arge_miiproxy);
1034
1035         bus_generic_detach(dev);
1036
1037         if (sc->arge_intrhand)
1038                 bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand);
1039
1040         if (sc->arge_res)
1041                 bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid,
1042                     sc->arge_res);
1043
1044         if (ifp)
1045                 if_free(ifp);
1046
1047         arge_dma_free(sc);
1048
1049         mtx_destroy(&sc->arge_mtx);
1050
1051         return (0);
1052
1053 }
1054
1055 static int
1056 arge_suspend(device_t dev)
1057 {
1058
1059         panic("%s", __func__);
1060         return 0;
1061 }
1062
1063 static int
1064 arge_resume(device_t dev)
1065 {
1066
1067         panic("%s", __func__);
1068         return 0;
1069 }
1070
1071 static int
1072 arge_shutdown(device_t dev)
1073 {
1074         struct arge_softc       *sc;
1075
1076         sc = device_get_softc(dev);
1077
1078         ARGE_LOCK(sc);
1079         arge_stop(sc);
1080         ARGE_UNLOCK(sc);
1081
1082         return (0);
1083 }
1084
1085 static void
1086 arge_hinted_child(device_t bus, const char *dname, int dunit)
1087 {
1088         BUS_ADD_CHILD(bus, 0, dname, dunit);
1089         device_printf(bus, "hinted child %s%d\n", dname, dunit);
1090 }
1091
1092 static int
1093 arge_mdio_busy(struct arge_softc *sc)
1094 {
1095         int i,result;
1096
1097         for (i = 0; i < ARGE_MII_TIMEOUT; i++) {
1098                 DELAY(5);
1099                 ARGE_MDIO_BARRIER_READ(sc);
1100                 result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_INDICATOR);
1101                 if (! result)
1102                         return (0);
1103                 DELAY(5);
1104         }
1105         return (-1);
1106 }
1107
1108 static int
1109 arge_miibus_readreg(device_t dev, int phy, int reg)
1110 {
1111         struct arge_softc * sc = device_get_softc(dev);
1112         int result;
1113         uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT)
1114             | (reg & MAC_MII_REG_MASK);
1115
1116         mtx_lock(&miibus_mtx);
1117         ARGE_MDIO_BARRIER_RW(sc);
1118         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
1119         ARGE_MDIO_BARRIER_WRITE(sc);
1120         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr);
1121         ARGE_MDIO_BARRIER_WRITE(sc);
1122         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ);
1123
1124         if (arge_mdio_busy(sc) != 0) {
1125                 mtx_unlock(&miibus_mtx);
1126                 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
1127                 /* XXX: return ERRNO istead? */
1128                 return (-1);
1129         }
1130
1131         ARGE_MDIO_BARRIER_READ(sc);
1132         result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK;
1133         ARGE_MDIO_BARRIER_RW(sc);
1134         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE);
1135         mtx_unlock(&miibus_mtx);
1136
1137         ARGEDEBUG(sc, ARGE_DBG_MII,
1138             "%s: phy=%d, reg=%02x, value[%08x]=%04x\n",
1139             __func__, phy, reg, addr, result);
1140
1141         return (result);
1142 }
1143
1144 static int
1145 arge_miibus_writereg(device_t dev, int phy, int reg, int data)
1146 {
1147         struct arge_softc * sc = device_get_softc(dev);
1148         uint32_t addr =
1149             (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK);
1150
1151         ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__, 
1152             phy, reg, data);
1153
1154         mtx_lock(&miibus_mtx);
1155         ARGE_MDIO_BARRIER_RW(sc);
1156         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr);
1157         ARGE_MDIO_BARRIER_WRITE(sc);
1158         ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CONTROL, data);
1159         ARGE_MDIO_BARRIER_WRITE(sc);
1160
1161         if (arge_mdio_busy(sc) != 0) {
1162                 mtx_unlock(&miibus_mtx);
1163                 ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__);
1164                 /* XXX: return ERRNO istead? */
1165                 return (-1);
1166         }
1167
1168         mtx_unlock(&miibus_mtx);
1169         return (0);
1170 }
1171
1172 static void
1173 arge_miibus_statchg(device_t dev)
1174 {
1175         struct arge_softc       *sc;
1176
1177         sc = device_get_softc(dev);
1178         taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task);
1179 }
1180
1181 static void
1182 arge_link_task(void *arg, int pending)
1183 {
1184         struct arge_softc       *sc;
1185         sc = (struct arge_softc *)arg;
1186
1187         ARGE_LOCK(sc);
1188         arge_update_link_locked(sc);
1189         ARGE_UNLOCK(sc);
1190 }
1191
1192 static void
1193 arge_update_link_locked(struct arge_softc *sc)
1194 {
1195         struct mii_data         *mii;
1196         struct ifnet            *ifp;
1197         uint32_t                media, duplex;
1198
1199         mii = device_get_softc(sc->arge_miibus);
1200         ifp = sc->arge_ifp;
1201         if (mii == NULL || ifp == NULL ||
1202             (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1203                 return;
1204         }
1205
1206         /*
1207          * If we have a static media type configured, then
1208          * use that.  Some PHY configurations (eg QCA955x -> AR8327)
1209          * use a static speed/duplex between the SoC and switch,
1210          * even though the front-facing PHY speed changes.
1211          */
1212         if (sc->arge_media_type != 0) {
1213                 ARGEDEBUG(sc, ARGE_DBG_MII, "%s: fixed; media=%d, duplex=%d\n",
1214                     __func__,
1215                     sc->arge_media_type,
1216                     sc->arge_duplex_mode);
1217                 if (mii->mii_media_status & IFM_ACTIVE) {
1218                         sc->arge_link_status = 1;
1219                 } else {
1220                         sc->arge_link_status = 0;
1221                 }
1222                 arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode);
1223         }
1224
1225         if (mii->mii_media_status & IFM_ACTIVE) {
1226
1227                 media = IFM_SUBTYPE(mii->mii_media_active);
1228                 if (media != IFM_NONE) {
1229                         sc->arge_link_status = 1;
1230                         duplex = mii->mii_media_active & IFM_GMASK;
1231                         ARGEDEBUG(sc, ARGE_DBG_MII, "%s: media=%d, duplex=%d\n",
1232                             __func__,
1233                             media,
1234                             duplex);
1235                         arge_set_pll(sc, media, duplex);
1236                 }
1237         } else {
1238                 sc->arge_link_status = 0;
1239         }
1240 }
1241
1242 static void
1243 arge_set_pll(struct arge_softc *sc, int media, int duplex)
1244 {
1245         uint32_t                cfg, ifcontrol, rx_filtmask;
1246         uint32_t                fifo_tx, pll;
1247         int if_speed;
1248
1249         /*
1250          * XXX Verify - is this valid for all chips?
1251          * QCA955x (and likely some of the earlier chips!) define
1252          * this as nibble mode and byte mode, and those have to do
1253          * with the interface type (MII/SMII versus GMII/RGMII.)
1254          */
1255         ARGEDEBUG(sc, ARGE_DBG_PLL, "set_pll(%04x, %s)\n", media,
1256             duplex == IFM_FDX ? "full" : "half");
1257         cfg = ARGE_READ(sc, AR71XX_MAC_CFG2);
1258         cfg &= ~(MAC_CFG2_IFACE_MODE_1000
1259             | MAC_CFG2_IFACE_MODE_10_100
1260             | MAC_CFG2_FULL_DUPLEX);
1261
1262         if (duplex == IFM_FDX)
1263                 cfg |= MAC_CFG2_FULL_DUPLEX;
1264
1265         ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL);
1266         ifcontrol &= ~MAC_IFCONTROL_SPEED;
1267         rx_filtmask =
1268             ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK);
1269         rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE;
1270
1271         switch(media) {
1272         case IFM_10_T:
1273                 cfg |= MAC_CFG2_IFACE_MODE_10_100;
1274                 if_speed = 10;
1275                 break;
1276         case IFM_100_TX:
1277                 cfg |= MAC_CFG2_IFACE_MODE_10_100;
1278                 ifcontrol |= MAC_IFCONTROL_SPEED;
1279                 if_speed = 100;
1280                 break;
1281         case IFM_1000_T:
1282         case IFM_1000_SX:
1283                 cfg |= MAC_CFG2_IFACE_MODE_1000;
1284                 rx_filtmask |= FIFO_RX_MASK_BYTE_MODE;
1285                 if_speed = 1000;
1286                 break;
1287         default:
1288                 if_speed = 100;
1289                 device_printf(sc->arge_dev,
1290                     "Unknown media %d\n", media);
1291         }
1292
1293         ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: if_speed=%d\n", __func__, if_speed);
1294
1295         switch (ar71xx_soc) {
1296                 case AR71XX_SOC_AR7240:
1297                 case AR71XX_SOC_AR7241:
1298                 case AR71XX_SOC_AR7242:
1299                 case AR71XX_SOC_AR9330:
1300                 case AR71XX_SOC_AR9331:
1301                 case AR71XX_SOC_AR9341:
1302                 case AR71XX_SOC_AR9342:
1303                 case AR71XX_SOC_AR9344:
1304                 case AR71XX_SOC_QCA9533:
1305                 case AR71XX_SOC_QCA9533_V2:
1306                 case AR71XX_SOC_QCA9556:
1307                 case AR71XX_SOC_QCA9558:
1308                         fifo_tx = 0x01f00140;
1309                         break;
1310                 case AR71XX_SOC_AR9130:
1311                 case AR71XX_SOC_AR9132:
1312                         fifo_tx = 0x00780fff;
1313                         break;
1314                 /* AR71xx */
1315                 default:
1316                         fifo_tx = 0x008001ff;
1317         }
1318
1319         ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg);
1320         ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol);
1321         ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK,
1322             rx_filtmask);
1323         ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx);
1324
1325         /* fetch PLL registers */
1326         pll = ar71xx_device_get_eth_pll(sc->arge_mac_unit, if_speed);
1327         ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: pll=0x%x\n", __func__, pll);
1328
1329         /* Override if required by platform data */
1330         if (if_speed == 10 && sc->arge_pllcfg.pll_10 != 0)
1331                 pll = sc->arge_pllcfg.pll_10;
1332         else if (if_speed == 100 && sc->arge_pllcfg.pll_100 != 0)
1333                 pll = sc->arge_pllcfg.pll_100;
1334         else if (if_speed == 1000 && sc->arge_pllcfg.pll_1000 != 0)
1335                 pll = sc->arge_pllcfg.pll_1000;
1336         ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: final pll=0x%x\n", __func__, pll);
1337
1338         /* XXX ensure pll != 0 */
1339         ar71xx_device_set_pll_ge(sc->arge_mac_unit, if_speed, pll);
1340
1341         /* set MII registers */
1342         /*
1343          * This was introduced to match what the Linux ag71xx ethernet
1344          * driver does.  For the AR71xx case, it does set the port
1345          * MII speed.  However, if this is done, non-gigabit speeds
1346          * are not at all reliable when speaking via RGMII through
1347          * 'bridge' PHY port that's pretending to be a local PHY.
1348          *
1349          * Until that gets root caused, and until an AR71xx + normal
1350          * PHY board is tested, leave this disabled.
1351          */
1352 #if 0
1353         ar71xx_device_set_mii_speed(sc->arge_mac_unit, if_speed);
1354 #endif
1355 }
1356
1357
1358 static void
1359 arge_reset_dma(struct arge_softc *sc)
1360 {
1361
1362         ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: called\n", __func__);
1363
1364         ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0);
1365         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0);
1366
1367         ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0);
1368         ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0);
1369
1370         /* Clear all possible RX interrupts */
1371         while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD)
1372                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
1373
1374         /*
1375          * Clear all possible TX interrupts
1376          */
1377         while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT)
1378                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
1379
1380         /*
1381          * Now Rx/Tx errors
1382          */
1383         ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS,
1384             DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW);
1385         ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS,
1386             DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN);
1387
1388         /*
1389          * Force a DDR flush so any pending data is properly
1390          * flushed to RAM before underlying buffers are freed.
1391          */
1392         arge_flush_ddr(sc);
1393 }
1394
1395 static void
1396 arge_init(void *xsc)
1397 {
1398         struct arge_softc        *sc = xsc;
1399
1400         ARGE_LOCK(sc);
1401         arge_init_locked(sc);
1402         ARGE_UNLOCK(sc);
1403 }
1404
1405 static void
1406 arge_init_locked(struct arge_softc *sc)
1407 {
1408         struct ifnet            *ifp = sc->arge_ifp;
1409         struct mii_data         *mii;
1410
1411         ARGE_LOCK_ASSERT(sc);
1412
1413         if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
1414                 return;
1415
1416         /* Init circular RX list. */
1417         if (arge_rx_ring_init(sc) != 0) {
1418                 device_printf(sc->arge_dev,
1419                     "initialization failed: no memory for rx buffers\n");
1420                 arge_stop(sc);
1421                 return;
1422         }
1423
1424         /* Init tx descriptors. */
1425         arge_tx_ring_init(sc);
1426
1427         arge_reset_dma(sc);
1428
1429         if (sc->arge_miibus) {
1430                 mii = device_get_softc(sc->arge_miibus);
1431                 mii_mediachg(mii);
1432         }
1433         else {
1434                 /*
1435                  * Sun always shines over multiPHY interface
1436                  */
1437                 sc->arge_link_status = 1;
1438         }
1439
1440         ifp->if_drv_flags |= IFF_DRV_RUNNING;
1441         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1442
1443         if (sc->arge_miibus) {
1444                 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
1445                 arge_update_link_locked(sc);
1446         }
1447
1448         ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0));
1449         ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0));
1450
1451         /* Start listening */
1452         ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
1453
1454         /* Enable interrupts */
1455         ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1456 }
1457
1458 /*
1459  * Return whether the mbuf chain is correctly aligned
1460  * for the arge TX engine.
1461  *
1462  * All the MACs have a length requirement: any non-final
1463  * fragment (ie, descriptor with MORE bit set) needs to have
1464  * a length divisible by 4.
1465  *
1466  * The AR71xx, AR913x require the start address also be
1467  * DWORD aligned.  The later MACs don't.
1468  */
1469 static int
1470 arge_mbuf_chain_is_tx_aligned(struct arge_softc *sc, struct mbuf *m0)
1471 {
1472         struct mbuf *m;
1473
1474         for (m = m0; m != NULL; m = m->m_next) {
1475                 /*
1476                  * Only do this for chips that require it.
1477                  */
1478                 if ((sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE) &&
1479                     (mtod(m, intptr_t) & 3) != 0) {
1480                         sc->stats.tx_pkts_unaligned_start++;
1481                         return 0;
1482                 }
1483
1484                 /*
1485                  * All chips have this requirement for length.
1486                  */
1487                 if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0)) {
1488                         sc->stats.tx_pkts_unaligned_len++;
1489                         return 0;
1490                 }
1491         }
1492         return 1;
1493 }
1494
1495 /*
1496  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1497  * pointers to the fragment pointers.
1498  */
1499 static int
1500 arge_encap(struct arge_softc *sc, struct mbuf **m_head)
1501 {
1502         struct arge_txdesc      *txd;
1503         struct arge_desc        *desc, *prev_desc;
1504         bus_dma_segment_t       txsegs[ARGE_MAXFRAGS];
1505         int                     error, i, nsegs, prod, prev_prod;
1506         struct mbuf             *m;
1507
1508         ARGE_LOCK_ASSERT(sc);
1509
1510         /*
1511          * Fix mbuf chain based on hardware alignment constraints.
1512          */
1513         m = *m_head;
1514         if (! arge_mbuf_chain_is_tx_aligned(sc, m)) {
1515                 sc->stats.tx_pkts_unaligned++;
1516                 m = m_defrag(*m_head, M_NOWAIT);
1517                 if (m == NULL) {
1518                         *m_head = NULL;
1519                         return (ENOBUFS);
1520                 }
1521                 *m_head = m;
1522         } else
1523                 sc->stats.tx_pkts_aligned++;
1524
1525         prod = sc->arge_cdata.arge_tx_prod;
1526         txd = &sc->arge_cdata.arge_txdesc[prod];
1527         error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag,
1528             txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
1529
1530         if (error == EFBIG) {
1531                 panic("EFBIG");
1532         } else if (error != 0)
1533                 return (error);
1534
1535         if (nsegs == 0) {
1536                 m_freem(*m_head);
1537                 *m_head = NULL;
1538                 return (EIO);
1539         }
1540
1541         /* Check number of available descriptors. */
1542         if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 2)) {
1543                 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
1544                 sc->stats.tx_pkts_nosegs++;
1545                 return (ENOBUFS);
1546         }
1547
1548         txd->tx_m = *m_head;
1549         bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
1550             BUS_DMASYNC_PREWRITE);
1551
1552         /*
1553          * Make a list of descriptors for this packet. DMA controller will
1554          * walk through it while arge_link is not zero.
1555          *
1556          * Since we're in a endless circular buffer, ensure that
1557          * the first descriptor in a multi-descriptor ring is always
1558          * set to EMPTY, then un-do it when we're done populating.
1559          */
1560         prev_prod = prod;
1561         desc = prev_desc = NULL;
1562         for (i = 0; i < nsegs; i++) {
1563                 uint32_t tmp;
1564
1565                 desc = &sc->arge_rdata.arge_tx_ring[prod];
1566
1567                 /*
1568                  * Set DESC_EMPTY so the hardware (hopefully) stops at this
1569                  * point.  We don't want it to start transmitting descriptors
1570                  * before we've finished fleshing this out.
1571                  */
1572                 tmp = ARGE_DMASIZE(txsegs[i].ds_len);
1573                 if (i == 0)
1574                         tmp |= ARGE_DESC_EMPTY;
1575                 desc->packet_ctrl = tmp;
1576
1577                 /* XXX Note: only relevant for older MACs; but check length! */
1578                 if ((sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE) &&
1579                     (txsegs[i].ds_addr & 3))
1580                         panic("TX packet address unaligned\n");
1581
1582                 desc->packet_addr = txsegs[i].ds_addr;
1583
1584                 /* link with previous descriptor */
1585                 if (prev_desc)
1586                         prev_desc->packet_ctrl |= ARGE_DESC_MORE;
1587
1588                 sc->arge_cdata.arge_tx_cnt++;
1589                 prev_desc = desc;
1590                 ARGE_INC(prod, ARGE_TX_RING_COUNT);
1591         }
1592
1593         /* Update producer index. */
1594         sc->arge_cdata.arge_tx_prod = prod;
1595
1596         /*
1597          * The descriptors are updated, so enable the first one.
1598          */
1599         desc = &sc->arge_rdata.arge_tx_ring[prev_prod];
1600         desc->packet_ctrl &= ~ ARGE_DESC_EMPTY;
1601
1602         /* Sync descriptors. */
1603         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
1604             sc->arge_cdata.arge_tx_ring_map,
1605             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1606
1607         /* Flush writes */
1608         ARGE_BARRIER_WRITE(sc);
1609
1610         /* Start transmitting */
1611         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: setting DMA_TX_CONTROL_EN\n",
1612             __func__);
1613         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN);
1614         return (0);
1615 }
1616
1617 static void
1618 arge_start(struct ifnet *ifp)
1619 {
1620         struct arge_softc        *sc;
1621
1622         sc = ifp->if_softc;
1623
1624         ARGE_LOCK(sc);
1625         arge_start_locked(ifp);
1626         ARGE_UNLOCK(sc);
1627 }
1628
1629 static void
1630 arge_start_locked(struct ifnet *ifp)
1631 {
1632         struct arge_softc       *sc;
1633         struct mbuf             *m_head;
1634         int                     enq = 0;
1635
1636         sc = ifp->if_softc;
1637
1638         ARGE_LOCK_ASSERT(sc);
1639
1640         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: beginning\n", __func__);
1641
1642         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1643             IFF_DRV_RUNNING || sc->arge_link_status == 0 )
1644                 return;
1645
1646         /*
1647          * Before we go any further, check whether we're already full.
1648          * The below check errors out immediately if the ring is full
1649          * and never gets a chance to set this flag. Although it's
1650          * likely never needed, this at least avoids an unexpected
1651          * situation.
1652          */
1653         if (sc->arge_cdata.arge_tx_cnt >= ARGE_TX_RING_COUNT - 2) {
1654                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1655                 ARGEDEBUG(sc, ARGE_DBG_ERR,
1656                     "%s: tx_cnt %d >= max %d; setting IFF_DRV_OACTIVE\n",
1657                     __func__, sc->arge_cdata.arge_tx_cnt,
1658                     ARGE_TX_RING_COUNT - 2);
1659                 return;
1660         }
1661
1662         arge_flush_ddr(sc);
1663
1664         for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1665             sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) {
1666                 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1667                 if (m_head == NULL)
1668                         break;
1669
1670
1671                 /*
1672                  * Pack the data into the transmit ring.
1673                  */
1674                 if (arge_encap(sc, &m_head)) {
1675                         if (m_head == NULL)
1676                                 break;
1677                         IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1678                         ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1679                         break;
1680                 }
1681
1682                 enq++;
1683                 /*
1684                  * If there's a BPF listener, bounce a copy of this frame
1685                  * to him.
1686                  */
1687                 ETHER_BPF_MTAP(ifp, m_head);
1688         }
1689         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: finished; queued %d packets\n",
1690             __func__, enq);
1691 }
1692
1693 static void
1694 arge_stop(struct arge_softc *sc)
1695 {
1696         struct ifnet        *ifp;
1697
1698         ARGE_LOCK_ASSERT(sc);
1699
1700         ifp = sc->arge_ifp;
1701         ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1702         if (sc->arge_miibus)
1703                 callout_stop(&sc->arge_stat_callout);
1704
1705         /* mask out interrupts */
1706         ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1707
1708         arge_reset_dma(sc);
1709
1710         /* Flush FIFO and free any existing mbufs */
1711         arge_flush_ddr(sc);
1712         arge_rx_ring_free(sc);
1713         arge_tx_ring_free(sc);
1714 }
1715
1716
1717 static int
1718 arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1719 {
1720         struct arge_softc               *sc = ifp->if_softc;
1721         struct ifreq            *ifr = (struct ifreq *) data;
1722         struct mii_data         *mii;
1723         int                     error;
1724 #ifdef DEVICE_POLLING
1725         int                     mask;
1726 #endif
1727
1728         switch (command) {
1729         case SIOCSIFFLAGS:
1730                 ARGE_LOCK(sc);
1731                 if ((ifp->if_flags & IFF_UP) != 0) {
1732                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1733                                 if (((ifp->if_flags ^ sc->arge_if_flags)
1734                                     & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
1735                                         /* XXX: handle promisc & multi flags */
1736                                 }
1737
1738                         } else {
1739                                 if (!sc->arge_detach)
1740                                         arge_init_locked(sc);
1741                         }
1742                 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
1743                         ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1744                         arge_stop(sc);
1745                 }
1746                 sc->arge_if_flags = ifp->if_flags;
1747                 ARGE_UNLOCK(sc);
1748                 error = 0;
1749                 break;
1750         case SIOCADDMULTI:
1751         case SIOCDELMULTI:
1752                 /* XXX: implement SIOCDELMULTI */
1753                 error = 0;
1754                 break;
1755         case SIOCGIFMEDIA:
1756         case SIOCSIFMEDIA:
1757                 if (sc->arge_miibus) {
1758                         mii = device_get_softc(sc->arge_miibus);
1759                         error = ifmedia_ioctl(ifp, ifr, &mii->mii_media,
1760                             command);
1761                 }
1762                 else
1763                         error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia,
1764                             command);
1765                 break;
1766         case SIOCSIFCAP:
1767                 /* XXX: Check other capabilities */
1768 #ifdef DEVICE_POLLING
1769                 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
1770                 if (mask & IFCAP_POLLING) {
1771                         if (ifr->ifr_reqcap & IFCAP_POLLING) {
1772                                 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
1773                                 error = ether_poll_register(arge_poll, ifp);
1774                                 if (error)
1775                                         return error;
1776                                 ARGE_LOCK(sc);
1777                                 ifp->if_capenable |= IFCAP_POLLING;
1778                                 ARGE_UNLOCK(sc);
1779                         } else {
1780                                 ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
1781                                 error = ether_poll_deregister(ifp);
1782                                 ARGE_LOCK(sc);
1783                                 ifp->if_capenable &= ~IFCAP_POLLING;
1784                                 ARGE_UNLOCK(sc);
1785                         }
1786                 }
1787                 error = 0;
1788                 break;
1789 #endif
1790         default:
1791                 error = ether_ioctl(ifp, command, data);
1792                 break;
1793         }
1794
1795         return (error);
1796 }
1797
1798 /*
1799  * Set media options.
1800  */
1801 static int
1802 arge_ifmedia_upd(struct ifnet *ifp)
1803 {
1804         struct arge_softc               *sc;
1805         struct mii_data         *mii;
1806         struct mii_softc        *miisc;
1807         int                     error;
1808
1809         sc = ifp->if_softc;
1810         ARGE_LOCK(sc);
1811         mii = device_get_softc(sc->arge_miibus);
1812         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1813                 PHY_RESET(miisc);
1814         error = mii_mediachg(mii);
1815         ARGE_UNLOCK(sc);
1816
1817         return (error);
1818 }
1819
1820 /*
1821  * Report current media status.
1822  */
1823 static void
1824 arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1825 {
1826         struct arge_softc               *sc = ifp->if_softc;
1827         struct mii_data         *mii;
1828
1829         mii = device_get_softc(sc->arge_miibus);
1830         ARGE_LOCK(sc);
1831         mii_pollstat(mii);
1832         ifmr->ifm_active = mii->mii_media_active;
1833         ifmr->ifm_status = mii->mii_media_status;
1834         ARGE_UNLOCK(sc);
1835 }
1836
1837 struct arge_dmamap_arg {
1838         bus_addr_t      arge_busaddr;
1839 };
1840
1841 static void
1842 arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1843 {
1844         struct arge_dmamap_arg  *ctx;
1845
1846         if (error != 0)
1847                 return;
1848         ctx = arg;
1849         ctx->arge_busaddr = segs[0].ds_addr;
1850 }
1851
1852 static int
1853 arge_dma_alloc(struct arge_softc *sc)
1854 {
1855         struct arge_dmamap_arg  ctx;
1856         struct arge_txdesc      *txd;
1857         struct arge_rxdesc      *rxd;
1858         int                     error, i;
1859         int                     arge_tx_align, arge_rx_align;
1860
1861         /* Assume 4 byte alignment by default */
1862         arge_tx_align = 4;
1863         arge_rx_align = 4;
1864
1865         if (sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_1BYTE)
1866                 arge_tx_align = 1;
1867         if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE)
1868                 arge_rx_align = 1;
1869
1870         /* Create parent DMA tag. */
1871         error = bus_dma_tag_create(
1872             bus_get_dma_tag(sc->arge_dev),      /* parent */
1873             1, 0,                       /* alignment, boundary */
1874             BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1875             BUS_SPACE_MAXADDR,          /* highaddr */
1876             NULL, NULL,                 /* filter, filterarg */
1877             BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1878             0,                          /* nsegments */
1879             BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1880             0,                          /* flags */
1881             NULL, NULL,                 /* lockfunc, lockarg */
1882             &sc->arge_cdata.arge_parent_tag);
1883         if (error != 0) {
1884                 device_printf(sc->arge_dev,
1885                     "failed to create parent DMA tag\n");
1886                 goto fail;
1887         }
1888         /* Create tag for Tx ring. */
1889         error = bus_dma_tag_create(
1890             sc->arge_cdata.arge_parent_tag,     /* parent */
1891             ARGE_RING_ALIGN, 0,         /* alignment, boundary */
1892             BUS_SPACE_MAXADDR,          /* lowaddr */
1893             BUS_SPACE_MAXADDR,          /* highaddr */
1894             NULL, NULL,                 /* filter, filterarg */
1895             ARGE_TX_DMA_SIZE,           /* maxsize */
1896             1,                          /* nsegments */
1897             ARGE_TX_DMA_SIZE,           /* maxsegsize */
1898             0,                          /* flags */
1899             NULL, NULL,                 /* lockfunc, lockarg */
1900             &sc->arge_cdata.arge_tx_ring_tag);
1901         if (error != 0) {
1902                 device_printf(sc->arge_dev,
1903                     "failed to create Tx ring DMA tag\n");
1904                 goto fail;
1905         }
1906
1907         /* Create tag for Rx ring. */
1908         error = bus_dma_tag_create(
1909             sc->arge_cdata.arge_parent_tag,     /* parent */
1910             ARGE_RING_ALIGN, 0,         /* alignment, boundary */
1911             BUS_SPACE_MAXADDR,          /* lowaddr */
1912             BUS_SPACE_MAXADDR,          /* highaddr */
1913             NULL, NULL,                 /* filter, filterarg */
1914             ARGE_RX_DMA_SIZE,           /* maxsize */
1915             1,                          /* nsegments */
1916             ARGE_RX_DMA_SIZE,           /* maxsegsize */
1917             0,                          /* flags */
1918             NULL, NULL,                 /* lockfunc, lockarg */
1919             &sc->arge_cdata.arge_rx_ring_tag);
1920         if (error != 0) {
1921                 device_printf(sc->arge_dev,
1922                     "failed to create Rx ring DMA tag\n");
1923                 goto fail;
1924         }
1925
1926         /* Create tag for Tx buffers. */
1927         error = bus_dma_tag_create(
1928             sc->arge_cdata.arge_parent_tag,     /* parent */
1929             arge_tx_align, 0,           /* alignment, boundary */
1930             BUS_SPACE_MAXADDR,          /* lowaddr */
1931             BUS_SPACE_MAXADDR,          /* highaddr */
1932             NULL, NULL,                 /* filter, filterarg */
1933             MCLBYTES * ARGE_MAXFRAGS,   /* maxsize */
1934             ARGE_MAXFRAGS,              /* nsegments */
1935             MCLBYTES,                   /* maxsegsize */
1936             0,                          /* flags */
1937             NULL, NULL,                 /* lockfunc, lockarg */
1938             &sc->arge_cdata.arge_tx_tag);
1939         if (error != 0) {
1940                 device_printf(sc->arge_dev, "failed to create Tx DMA tag\n");
1941                 goto fail;
1942         }
1943
1944         /* Create tag for Rx buffers. */
1945         error = bus_dma_tag_create(
1946             sc->arge_cdata.arge_parent_tag,     /* parent */
1947             arge_rx_align, 0,           /* alignment, boundary */
1948             BUS_SPACE_MAXADDR,          /* lowaddr */
1949             BUS_SPACE_MAXADDR,          /* highaddr */
1950             NULL, NULL,                 /* filter, filterarg */
1951             MCLBYTES,                   /* maxsize */
1952             ARGE_MAXFRAGS,              /* nsegments */
1953             MCLBYTES,                   /* maxsegsize */
1954             0,                          /* flags */
1955             NULL, NULL,                 /* lockfunc, lockarg */
1956             &sc->arge_cdata.arge_rx_tag);
1957         if (error != 0) {
1958                 device_printf(sc->arge_dev, "failed to create Rx DMA tag\n");
1959                 goto fail;
1960         }
1961
1962         /* Allocate DMA'able memory and load the DMA map for Tx ring. */
1963         error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag,
1964             (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK |
1965             BUS_DMA_COHERENT | BUS_DMA_ZERO,
1966             &sc->arge_cdata.arge_tx_ring_map);
1967         if (error != 0) {
1968                 device_printf(sc->arge_dev,
1969                     "failed to allocate DMA'able memory for Tx ring\n");
1970                 goto fail;
1971         }
1972
1973         ctx.arge_busaddr = 0;
1974         error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag,
1975             sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring,
1976             ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1977         if (error != 0 || ctx.arge_busaddr == 0) {
1978                 device_printf(sc->arge_dev,
1979                     "failed to load DMA'able memory for Tx ring\n");
1980                 goto fail;
1981         }
1982         sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr;
1983
1984         /* Allocate DMA'able memory and load the DMA map for Rx ring. */
1985         error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag,
1986             (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK |
1987             BUS_DMA_COHERENT | BUS_DMA_ZERO,
1988             &sc->arge_cdata.arge_rx_ring_map);
1989         if (error != 0) {
1990                 device_printf(sc->arge_dev,
1991                     "failed to allocate DMA'able memory for Rx ring\n");
1992                 goto fail;
1993         }
1994
1995         ctx.arge_busaddr = 0;
1996         error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag,
1997             sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring,
1998             ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0);
1999         if (error != 0 || ctx.arge_busaddr == 0) {
2000                 device_printf(sc->arge_dev,
2001                     "failed to load DMA'able memory for Rx ring\n");
2002                 goto fail;
2003         }
2004         sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr;
2005
2006         /* Create DMA maps for Tx buffers. */
2007         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
2008                 txd = &sc->arge_cdata.arge_txdesc[i];
2009                 txd->tx_m = NULL;
2010                 txd->tx_dmamap = NULL;
2011                 error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0,
2012                     &txd->tx_dmamap);
2013                 if (error != 0) {
2014                         device_printf(sc->arge_dev,
2015                             "failed to create Tx dmamap\n");
2016                         goto fail;
2017                 }
2018         }
2019         /* Create DMA maps for Rx buffers. */
2020         if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
2021             &sc->arge_cdata.arge_rx_sparemap)) != 0) {
2022                 device_printf(sc->arge_dev,
2023                     "failed to create spare Rx dmamap\n");
2024                 goto fail;
2025         }
2026         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
2027                 rxd = &sc->arge_cdata.arge_rxdesc[i];
2028                 rxd->rx_m = NULL;
2029                 rxd->rx_dmamap = NULL;
2030                 error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0,
2031                     &rxd->rx_dmamap);
2032                 if (error != 0) {
2033                         device_printf(sc->arge_dev,
2034                             "failed to create Rx dmamap\n");
2035                         goto fail;
2036                 }
2037         }
2038
2039 fail:
2040         return (error);
2041 }
2042
2043 static void
2044 arge_dma_free(struct arge_softc *sc)
2045 {
2046         struct arge_txdesc      *txd;
2047         struct arge_rxdesc      *rxd;
2048         int                     i;
2049
2050         /* Tx ring. */
2051         if (sc->arge_cdata.arge_tx_ring_tag) {
2052                 if (sc->arge_rdata.arge_tx_ring_paddr)
2053                         bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag,
2054                             sc->arge_cdata.arge_tx_ring_map);
2055                 if (sc->arge_rdata.arge_tx_ring)
2056                         bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag,
2057                             sc->arge_rdata.arge_tx_ring,
2058                             sc->arge_cdata.arge_tx_ring_map);
2059                 sc->arge_rdata.arge_tx_ring = NULL;
2060                 sc->arge_rdata.arge_tx_ring_paddr = 0;
2061                 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag);
2062                 sc->arge_cdata.arge_tx_ring_tag = NULL;
2063         }
2064         /* Rx ring. */
2065         if (sc->arge_cdata.arge_rx_ring_tag) {
2066                 if (sc->arge_rdata.arge_rx_ring_paddr)
2067                         bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag,
2068                             sc->arge_cdata.arge_rx_ring_map);
2069                 if (sc->arge_rdata.arge_rx_ring)
2070                         bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag,
2071                             sc->arge_rdata.arge_rx_ring,
2072                             sc->arge_cdata.arge_rx_ring_map);
2073                 sc->arge_rdata.arge_rx_ring = NULL;
2074                 sc->arge_rdata.arge_rx_ring_paddr = 0;
2075                 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag);
2076                 sc->arge_cdata.arge_rx_ring_tag = NULL;
2077         }
2078         /* Tx buffers. */
2079         if (sc->arge_cdata.arge_tx_tag) {
2080                 for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
2081                         txd = &sc->arge_cdata.arge_txdesc[i];
2082                         if (txd->tx_dmamap) {
2083                                 bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag,
2084                                     txd->tx_dmamap);
2085                                 txd->tx_dmamap = NULL;
2086                         }
2087                 }
2088                 bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag);
2089                 sc->arge_cdata.arge_tx_tag = NULL;
2090         }
2091         /* Rx buffers. */
2092         if (sc->arge_cdata.arge_rx_tag) {
2093                 for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
2094                         rxd = &sc->arge_cdata.arge_rxdesc[i];
2095                         if (rxd->rx_dmamap) {
2096                                 bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
2097                                     rxd->rx_dmamap);
2098                                 rxd->rx_dmamap = NULL;
2099                         }
2100                 }
2101                 if (sc->arge_cdata.arge_rx_sparemap) {
2102                         bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag,
2103                             sc->arge_cdata.arge_rx_sparemap);
2104                         sc->arge_cdata.arge_rx_sparemap = 0;
2105                 }
2106                 bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag);
2107                 sc->arge_cdata.arge_rx_tag = NULL;
2108         }
2109
2110         if (sc->arge_cdata.arge_parent_tag) {
2111                 bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag);
2112                 sc->arge_cdata.arge_parent_tag = NULL;
2113         }
2114 }
2115
2116 /*
2117  * Initialize the transmit descriptors.
2118  */
2119 static int
2120 arge_tx_ring_init(struct arge_softc *sc)
2121 {
2122         struct arge_ring_data   *rd;
2123         struct arge_txdesc      *txd;
2124         bus_addr_t              addr;
2125         int                     i;
2126
2127         sc->arge_cdata.arge_tx_prod = 0;
2128         sc->arge_cdata.arge_tx_cons = 0;
2129         sc->arge_cdata.arge_tx_cnt = 0;
2130
2131         rd = &sc->arge_rdata;
2132         bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring));
2133         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
2134                 if (i == ARGE_TX_RING_COUNT - 1)
2135                         addr = ARGE_TX_RING_ADDR(sc, 0);
2136                 else
2137                         addr = ARGE_TX_RING_ADDR(sc, i + 1);
2138                 rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY;
2139                 rd->arge_tx_ring[i].next_desc = addr;
2140                 txd = &sc->arge_cdata.arge_txdesc[i];
2141                 txd->tx_m = NULL;
2142         }
2143
2144         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
2145             sc->arge_cdata.arge_tx_ring_map,
2146             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2147
2148         return (0);
2149 }
2150
2151 /*
2152  * Free the Tx ring, unload any pending dma transaction and free the mbuf.
2153  */
2154 static void
2155 arge_tx_ring_free(struct arge_softc *sc)
2156 {
2157         struct arge_txdesc      *txd;
2158         int                     i;
2159
2160         /* Free the Tx buffers. */
2161         for (i = 0; i < ARGE_TX_RING_COUNT; i++) {
2162                 txd = &sc->arge_cdata.arge_txdesc[i];
2163                 if (txd->tx_dmamap) {
2164                         bus_dmamap_sync(sc->arge_cdata.arge_tx_tag,
2165                             txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
2166                         bus_dmamap_unload(sc->arge_cdata.arge_tx_tag,
2167                             txd->tx_dmamap);
2168                 }
2169                 if (txd->tx_m)
2170                         m_freem(txd->tx_m);
2171                 txd->tx_m = NULL;
2172         }
2173 }
2174
2175 /*
2176  * Initialize the RX descriptors and allocate mbufs for them. Note that
2177  * we arrange the descriptors in a closed ring, so that the last descriptor
2178  * points back to the first.
2179  */
2180 static int
2181 arge_rx_ring_init(struct arge_softc *sc)
2182 {
2183         struct arge_ring_data   *rd;
2184         struct arge_rxdesc      *rxd;
2185         bus_addr_t              addr;
2186         int                     i;
2187
2188         sc->arge_cdata.arge_rx_cons = 0;
2189
2190         rd = &sc->arge_rdata;
2191         bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring));
2192         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
2193                 rxd = &sc->arge_cdata.arge_rxdesc[i];
2194                 if (rxd->rx_m != NULL) {
2195                         device_printf(sc->arge_dev,
2196                             "%s: ring[%d] rx_m wasn't free?\n",
2197                             __func__,
2198                             i);
2199                 }
2200                 rxd->rx_m = NULL;
2201                 rxd->desc = &rd->arge_rx_ring[i];
2202                 if (i == ARGE_RX_RING_COUNT - 1)
2203                         addr = ARGE_RX_RING_ADDR(sc, 0);
2204                 else
2205                         addr = ARGE_RX_RING_ADDR(sc, i + 1);
2206                 rd->arge_rx_ring[i].next_desc = addr;
2207                 if (arge_newbuf(sc, i) != 0) {
2208                         return (ENOBUFS);
2209                 }
2210         }
2211
2212         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
2213             sc->arge_cdata.arge_rx_ring_map,
2214             BUS_DMASYNC_PREWRITE);
2215
2216         return (0);
2217 }
2218
2219 /*
2220  * Free all the buffers in the RX ring.
2221  *
2222  * TODO: ensure that DMA is disabled and no pending DMA
2223  * is lurking in the FIFO.
2224  */
2225 static void
2226 arge_rx_ring_free(struct arge_softc *sc)
2227 {
2228         int i;
2229         struct arge_rxdesc      *rxd;
2230
2231         ARGE_LOCK_ASSERT(sc);
2232
2233         for (i = 0; i < ARGE_RX_RING_COUNT; i++) {
2234                 rxd = &sc->arge_cdata.arge_rxdesc[i];
2235                 /* Unmap the mbuf */
2236                 if (rxd->rx_m != NULL) {
2237                         bus_dmamap_unload(sc->arge_cdata.arge_rx_tag,
2238                             rxd->rx_dmamap);
2239                         m_free(rxd->rx_m);
2240                         rxd->rx_m = NULL;
2241                 }
2242         }
2243 }
2244
2245 /*
2246  * Initialize an RX descriptor and attach an MBUF cluster.
2247  */
2248 static int
2249 arge_newbuf(struct arge_softc *sc, int idx)
2250 {
2251         struct arge_desc                *desc;
2252         struct arge_rxdesc      *rxd;
2253         struct mbuf             *m;
2254         bus_dma_segment_t       segs[1];
2255         bus_dmamap_t            map;
2256         int                     nsegs;
2257
2258         /* XXX TODO: should just allocate an explicit 2KiB buffer */
2259         m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2260         if (m == NULL)
2261                 return (ENOBUFS);
2262         m->m_len = m->m_pkthdr.len = MCLBYTES;
2263
2264         /*
2265          * Add extra space to "adjust" (copy) the packet back to be aligned
2266          * for purposes of IPv4/IPv6 header contents.
2267          */
2268         if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE)
2269                 m_adj(m, sizeof(uint64_t));
2270         /*
2271          * If it's a 1-byte aligned buffer, then just offset it two bytes
2272          * and that will give us a hopefully correctly DWORD aligned
2273          * L3 payload - and we won't have to undo it afterwards.
2274          */
2275         else if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE)
2276                 m_adj(m, sizeof(uint16_t));
2277
2278         if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag,
2279             sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) {
2280                 m_freem(m);
2281                 return (ENOBUFS);
2282         }
2283         KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
2284
2285         rxd = &sc->arge_cdata.arge_rxdesc[idx];
2286         if (rxd->rx_m != NULL) {
2287                 bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap);
2288         }
2289         map = rxd->rx_dmamap;
2290         rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap;
2291         sc->arge_cdata.arge_rx_sparemap = map;
2292         rxd->rx_m = m;
2293         desc = rxd->desc;
2294         if ((sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) &&
2295             segs[0].ds_addr & 3)
2296                 panic("RX packet address unaligned");
2297         desc->packet_addr = segs[0].ds_addr;
2298         desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len);
2299
2300         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
2301             sc->arge_cdata.arge_rx_ring_map,
2302             BUS_DMASYNC_PREWRITE);
2303
2304         return (0);
2305 }
2306
2307 /*
2308  * Move the data backwards 16 bits to (hopefully!) ensure the
2309  * IPv4/IPv6 payload is aligned.
2310  *
2311  * This is required for earlier hardware where the RX path
2312  * requires DWORD aligned buffers.
2313  */
2314 static __inline void
2315 arge_fixup_rx(struct mbuf *m)
2316 {
2317         int             i;
2318         uint16_t        *src, *dst;
2319
2320         src = mtod(m, uint16_t *);
2321         dst = src - 1;
2322
2323         for (i = 0; i < m->m_len / sizeof(uint16_t); i++) {
2324                 *dst++ = *src++;
2325         }
2326
2327         if (m->m_len % sizeof(uint16_t))
2328                 *(uint8_t *)dst = *(uint8_t *)src;
2329
2330         m->m_data -= ETHER_ALIGN;
2331 }
2332
2333 #ifdef DEVICE_POLLING
2334 static int
2335 arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2336 {
2337         struct arge_softc *sc = ifp->if_softc;
2338         int rx_npkts = 0;
2339
2340         if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
2341                 ARGE_LOCK(sc);
2342                 arge_tx_locked(sc);
2343                 rx_npkts = arge_rx_locked(sc);
2344                 ARGE_UNLOCK(sc);
2345         }
2346
2347         return (rx_npkts);
2348 }
2349 #endif /* DEVICE_POLLING */
2350
2351
2352 static void
2353 arge_tx_locked(struct arge_softc *sc)
2354 {
2355         struct arge_txdesc      *txd;
2356         struct arge_desc        *cur_tx;
2357         struct ifnet            *ifp;
2358         uint32_t                ctrl;
2359         int                     cons, prod;
2360
2361         ARGE_LOCK_ASSERT(sc);
2362
2363         cons = sc->arge_cdata.arge_tx_cons;
2364         prod = sc->arge_cdata.arge_tx_prod;
2365
2366         ARGEDEBUG(sc, ARGE_DBG_TX, "%s: cons=%d, prod=%d\n", __func__, cons,
2367             prod);
2368
2369         if (cons == prod)
2370                 return;
2371
2372         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
2373             sc->arge_cdata.arge_tx_ring_map,
2374             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2375
2376         ifp = sc->arge_ifp;
2377         /*
2378          * Go through our tx list and free mbufs for those
2379          * frames that have been transmitted.
2380          */
2381         for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) {
2382                 cur_tx = &sc->arge_rdata.arge_tx_ring[cons];
2383                 ctrl = cur_tx->packet_ctrl;
2384                 /* Check if descriptor has "finished" flag */
2385                 if ((ctrl & ARGE_DESC_EMPTY) == 0)
2386                         break;
2387
2388                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT);
2389
2390                 sc->arge_cdata.arge_tx_cnt--;
2391                 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2392
2393                 txd = &sc->arge_cdata.arge_txdesc[cons];
2394
2395                 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
2396
2397                 bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap,
2398                     BUS_DMASYNC_POSTWRITE);
2399                 bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap);
2400
2401                 /* Free only if it's first descriptor in list */
2402                 if (txd->tx_m)
2403                         m_freem(txd->tx_m);
2404                 txd->tx_m = NULL;
2405
2406                 /* reset descriptor */
2407                 cur_tx->packet_addr = 0;
2408         }
2409
2410         sc->arge_cdata.arge_tx_cons = cons;
2411
2412         bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag,
2413             sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE);
2414 }
2415
2416
2417 static int
2418 arge_rx_locked(struct arge_softc *sc)
2419 {
2420         struct arge_rxdesc      *rxd;
2421         struct ifnet            *ifp = sc->arge_ifp;
2422         int                     cons, prog, packet_len, i;
2423         struct arge_desc        *cur_rx;
2424         struct mbuf             *m;
2425         int                     rx_npkts = 0;
2426
2427         ARGE_LOCK_ASSERT(sc);
2428
2429         cons = sc->arge_cdata.arge_rx_cons;
2430
2431         bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
2432             sc->arge_cdata.arge_rx_ring_map,
2433             BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2434
2435         for (prog = 0; prog < ARGE_RX_RING_COUNT;
2436             ARGE_INC(cons, ARGE_RX_RING_COUNT)) {
2437                 cur_rx = &sc->arge_rdata.arge_rx_ring[cons];
2438                 rxd = &sc->arge_cdata.arge_rxdesc[cons];
2439                 m = rxd->rx_m;
2440
2441                 if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0)
2442                        break;
2443
2444                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD);
2445
2446                 prog++;
2447
2448                 packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl);
2449                 bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap,
2450                     BUS_DMASYNC_POSTREAD);
2451                 m = rxd->rx_m;
2452
2453                 /*
2454                  * If the MAC requires 4 byte alignment then the RX setup
2455                  * routine will have pre-offset things; so un-offset it here.
2456                  */
2457                 if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE)
2458                         arge_fixup_rx(m);
2459
2460                 m->m_pkthdr.rcvif = ifp;
2461                 /* Skip 4 bytes of CRC */
2462                 m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN;
2463                 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2464                 rx_npkts++;
2465
2466                 ARGE_UNLOCK(sc);
2467                 (*ifp->if_input)(ifp, m);
2468                 ARGE_LOCK(sc);
2469                 cur_rx->packet_addr = 0;
2470         }
2471
2472         if (prog > 0) {
2473
2474                 i = sc->arge_cdata.arge_rx_cons;
2475                 for (; prog > 0 ; prog--) {
2476                         if (arge_newbuf(sc, i) != 0) {
2477                                 device_printf(sc->arge_dev,
2478                                     "Failed to allocate buffer\n");
2479                                 break;
2480                         }
2481                         ARGE_INC(i, ARGE_RX_RING_COUNT);
2482                 }
2483
2484                 bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag,
2485                     sc->arge_cdata.arge_rx_ring_map,
2486                     BUS_DMASYNC_PREWRITE);
2487
2488                 sc->arge_cdata.arge_rx_cons = cons;
2489         }
2490
2491         return (rx_npkts);
2492 }
2493
2494 static int
2495 arge_intr_filter(void *arg)
2496 {
2497         struct arge_softc       *sc = arg;
2498         uint32_t                status, ints;
2499
2500         status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
2501         ints = ARGE_READ(sc, AR71XX_DMA_INTR);
2502
2503         ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints,
2504             "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
2505             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
2506         ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status,
2507             "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD"
2508             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
2509
2510         if (status & DMA_INTR_ALL) {
2511                 sc->arge_intr_status |= status;
2512                 ARGE_WRITE(sc, AR71XX_DMA_INTR, 0);
2513                 sc->stats.intr_ok++;
2514                 return (FILTER_SCHEDULE_THREAD);
2515         }
2516
2517         sc->arge_intr_status = 0;
2518         sc->stats.intr_stray++;
2519         return (FILTER_STRAY);
2520 }
2521
2522 static void
2523 arge_intr(void *arg)
2524 {
2525         struct arge_softc       *sc = arg;
2526         uint32_t                status;
2527         struct ifnet            *ifp = sc->arge_ifp;
2528 #ifdef  ARGE_DEBUG
2529         int i;
2530 #endif
2531
2532         status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS);
2533         status |= sc->arge_intr_status;
2534
2535         ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status,
2536             "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD"
2537             "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT");
2538
2539         /*
2540          * Is it our interrupt at all?
2541          */
2542         if (status == 0) {
2543                 sc->stats.intr_stray2++;
2544                 return;
2545         }
2546
2547 #ifdef  ARGE_DEBUG
2548         for (i = 0; i < 32; i++) {
2549                 if (status & (1U << i)) {
2550                         sc->intr_stats.count[i]++;
2551                 }
2552         }
2553 #endif
2554
2555         if (status & DMA_INTR_RX_BUS_ERROR) {
2556                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR);
2557                 device_printf(sc->arge_dev, "RX bus error");
2558                 return;
2559         }
2560
2561         if (status & DMA_INTR_TX_BUS_ERROR) {
2562                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR);
2563                 device_printf(sc->arge_dev, "TX bus error");
2564                 return;
2565         }
2566
2567         ARGE_LOCK(sc);
2568         arge_flush_ddr(sc);
2569
2570         if (status & DMA_INTR_RX_PKT_RCVD)
2571                 arge_rx_locked(sc);
2572
2573         /*
2574          * RX overrun disables the receiver.
2575          * Clear indication and re-enable rx.
2576          */
2577         if ( status & DMA_INTR_RX_OVERFLOW) {
2578                 ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW);
2579                 ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN);
2580                 sc->stats.rx_overflow++;
2581         }
2582
2583         if (status & DMA_INTR_TX_PKT_SENT)
2584                 arge_tx_locked(sc);
2585         /*
2586          * Underrun turns off TX. Clear underrun indication.
2587          * If there's anything left in the ring, reactivate the tx.
2588          */
2589         if (status & DMA_INTR_TX_UNDERRUN) {
2590                 ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN);
2591                 sc->stats.tx_underflow++;
2592                 ARGEDEBUG(sc, ARGE_DBG_TX, "%s: TX underrun; tx_cnt=%d\n",
2593                     __func__, sc->arge_cdata.arge_tx_cnt);
2594                 if (sc->arge_cdata.arge_tx_cnt > 0 ) {
2595                         ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL,
2596                             DMA_TX_CONTROL_EN);
2597                 }
2598         }
2599
2600         /*
2601          * If we've finished TXing and there's space for more packets
2602          * to be queued for TX, do so. Otherwise we may end up in a
2603          * situation where the interface send queue was filled
2604          * whilst the hardware queue was full, then the hardware
2605          * queue was drained by the interface send queue wasn't,
2606          * and thus if_start() is never called to kick-start
2607          * the send process (and all subsequent packets are simply
2608          * discarded.
2609          *
2610          * XXX TODO: make sure that the hardware deals nicely
2611          * with the possibility of the queue being enabled above
2612          * after a TX underrun, then having the hardware queue added
2613          * to below.
2614          */
2615         if (status & (DMA_INTR_TX_PKT_SENT | DMA_INTR_TX_UNDERRUN) &&
2616             (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
2617                 if (!IFQ_IS_EMPTY(&ifp->if_snd))
2618                         arge_start_locked(ifp);
2619         }
2620
2621         /*
2622          * We handled all bits, clear status
2623          */
2624         sc->arge_intr_status = 0;
2625         ARGE_UNLOCK(sc);
2626         /*
2627          * re-enable all interrupts
2628          */
2629         ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL);
2630 }
2631
2632
2633 static void
2634 arge_tick(void *xsc)
2635 {
2636         struct arge_softc       *sc = xsc;
2637         struct mii_data         *mii;
2638
2639         ARGE_LOCK_ASSERT(sc);
2640
2641         if (sc->arge_miibus) {
2642                 mii = device_get_softc(sc->arge_miibus);
2643                 mii_tick(mii);
2644                 callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc);
2645         }
2646 }
2647
2648 int
2649 arge_multiphy_mediachange(struct ifnet *ifp)
2650 {
2651         struct arge_softc *sc = ifp->if_softc;
2652         struct ifmedia *ifm = &sc->arge_ifmedia;
2653         struct ifmedia_entry *ife = ifm->ifm_cur;
2654
2655         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2656                 return (EINVAL);
2657
2658         if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
2659                 device_printf(sc->arge_dev,
2660                     "AUTO is not supported for multiphy MAC");
2661                 return (EINVAL);
2662         }
2663
2664         /*
2665          * Ignore everything
2666          */
2667         return (0);
2668 }
2669
2670 void
2671 arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2672 {
2673         struct arge_softc *sc = ifp->if_softc;
2674
2675         ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2676         ifmr->ifm_active = IFM_ETHER | sc->arge_media_type |
2677             sc->arge_duplex_mode;
2678 }
2679
2680 #if defined(ARGE_MDIO)
2681 static int
2682 argemdio_probe(device_t dev)
2683 {
2684         device_set_desc(dev, "Atheros AR71xx built-in ethernet interface, MDIO controller");
2685         return (0);
2686 }
2687
2688 static int
2689 argemdio_attach(device_t dev)
2690 {
2691         struct arge_softc       *sc;
2692         int                     error = 0;
2693
2694         sc = device_get_softc(dev);
2695         sc->arge_dev = dev;
2696         sc->arge_mac_unit = device_get_unit(dev);
2697         sc->arge_rid = 0;
2698         sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 
2699             &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE);
2700         if (sc->arge_res == NULL) {
2701                 device_printf(dev, "couldn't map memory\n");
2702                 error = ENXIO;
2703                 goto fail;
2704         }
2705
2706         /* Reset MAC - required for AR71xx MDIO to successfully occur */
2707         arge_reset_mac(sc);
2708         /* Reset MII bus */
2709         arge_reset_miibus(sc);
2710
2711         bus_generic_probe(dev);
2712         bus_enumerate_hinted_children(dev);
2713         error = bus_generic_attach(dev);
2714 fail:
2715         return (error);
2716 }
2717
2718 static int
2719 argemdio_detach(device_t dev)
2720 {
2721         return (0);
2722 }
2723
2724 #endif