]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/al_eth/al_eth.c
Merge ACPICA 20191018.
[FreeBSD/FreeBSD.git] / sys / dev / al_eth / al_eth.c
1 /*-
2  * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates
3  * All rights reserved.
4  *
5  * Developed by Semihalf.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/lock.h>
38 #include <sys/mbuf.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/socket.h>
43 #include <sys/sockio.h>
44 #include <sys/sysctl.h>
45 #include <sys/taskqueue.h>
46
47 #include <machine/atomic.h>
48
49 #include "opt_inet.h"
50 #include "opt_inet6.h"
51
52 #include <net/ethernet.h>
53 #include <net/if.h>
54 #include <net/if_var.h>
55 #include <net/if_arp.h>
56 #include <net/if_dl.h>
57 #include <net/if_media.h>
58 #include <net/if_types.h>
59 #include <netinet/in.h>
60 #include <net/if_vlan_var.h>
61 #include <netinet/tcp.h>
62 #include <netinet/tcp_lro.h>
63
64 #ifdef INET
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #endif
70
71 #ifdef INET6
72 #include <netinet/ip6.h>
73 #endif
74
75 #include <sys/sockio.h>
76
77 #include <dev/pci/pcireg.h>
78 #include <dev/pci/pcivar.h>
79
80 #include <dev/mii/mii.h>
81 #include <dev/mii/miivar.h>
82
83 #include <al_hal_common.h>
84 #include <al_hal_plat_services.h>
85 #include <al_hal_udma_config.h>
86 #include <al_hal_udma_iofic.h>
87 #include <al_hal_udma_debug.h>
88 #include <al_hal_eth.h>
89
90 #include "al_eth.h"
91 #include "al_init_eth_lm.h"
92 #include "arm/annapurna/alpine/alpine_serdes.h"
93
94 #include "miibus_if.h"
95
96 #define device_printf_dbg(fmt, ...) do {                                \
97         if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK();          \
98             device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();}          \
99         } while (0)
100
101 MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver");
102
103 /* move out to some pci header file */
104 #define PCI_VENDOR_ID_ANNAPURNA_LABS    0x1c36
105 #define PCI_DEVICE_ID_AL_ETH            0x0001
106 #define PCI_DEVICE_ID_AL_ETH_ADVANCED   0x0002
107 #define PCI_DEVICE_ID_AL_ETH_NIC        0x0003
108 #define PCI_DEVICE_ID_AL_ETH_FPGA_NIC   0x0030
109 #define PCI_DEVICE_ID_AL_CRYPTO         0x0011
110 #define PCI_DEVICE_ID_AL_CRYPTO_VF      0x8011
111 #define PCI_DEVICE_ID_AL_RAID_DMA       0x0021
112 #define PCI_DEVICE_ID_AL_RAID_DMA_VF    0x8021
113 #define PCI_DEVICE_ID_AL_USB            0x0041
114
115 #define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x"
116 #define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]
117
118 #define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE       0
119 #define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT      4
120 #define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX      (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \
121                                                  AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)
122
123 #define AL_ETH_MAC_TABLE_DROP_IDX               (AL_ETH_FWD_MAC_NUM - 1)
124 #define AL_ETH_MAC_TABLE_BROADCAST_IDX          (AL_ETH_MAC_TABLE_DROP_IDX - 1)
125
126 #define AL_ETH_THASH_UDMA_SHIFT         0
127 #define AL_ETH_THASH_UDMA_MASK          (0xF << AL_ETH_THASH_UDMA_SHIFT)
128
129 #define AL_ETH_THASH_Q_SHIFT            4
130 #define AL_ETH_THASH_Q_MASK             (0x3 << AL_ETH_THASH_Q_SHIFT)
131
132 /* the following defines should be moved to hal */
133 #define AL_ETH_FSM_ENTRY_IPV4_TCP               0
134 #define AL_ETH_FSM_ENTRY_IPV4_UDP               1
135 #define AL_ETH_FSM_ENTRY_IPV6_TCP               2
136 #define AL_ETH_FSM_ENTRY_IPV6_UDP               3
137 #define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP        4
138 #define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP        5
139
140 /* FSM DATA format */
141 #define AL_ETH_FSM_DATA_OUTER_2_TUPLE   0
142 #define AL_ETH_FSM_DATA_OUTER_4_TUPLE   1
143 #define AL_ETH_FSM_DATA_INNER_2_TUPLE   2
144 #define AL_ETH_FSM_DATA_INNER_4_TUPLE   3
145
146 #define AL_ETH_FSM_DATA_HASH_SEL        (1 << 2)
147
148 #define AL_ETH_FSM_DATA_DEFAULT_Q       0
149 #define AL_ETH_FSM_DATA_DEFAULT_UDMA    0
150
151 #define AL_BR_SIZE      512
152 #define AL_TSO_SIZE     65500
153 #define AL_DEFAULT_MTU  1500
154
155 #define CSUM_OFFLOAD            (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
156
157 #define AL_IP_ALIGNMENT_OFFSET  2
158
159 #define SFP_I2C_ADDR            0x50
160
161 #define AL_MASK_GROUP_A_INT     0x7
162 #define AL_MASK_GROUP_B_INT     0xF
163 #define AL_MASK_GROUP_C_INT     0xF
164 #define AL_MASK_GROUP_D_INT     0xFFFFFFFF
165
166 #define AL_REG_OFFSET_FORWARD_INTR      (0x1800000 + 0x1210)
167 #define AL_EN_FORWARD_INTR      0x1FFFF
168 #define AL_DIS_FORWARD_INTR     0
169
170 #define AL_M2S_MASK_INIT        0x480
171 #define AL_S2M_MASK_INIT        0x1E0
172 #define AL_M2S_S2M_MASK_NOT_INT (0x3f << 25)
173
174 #define AL_10BASE_T_SPEED       10
175 #define AL_100BASE_TX_SPEED     100
176 #define AL_1000BASE_T_SPEED     1000
177
178 static devclass_t al_devclass;
179
180 #define AL_RX_LOCK_INIT(_sc)    mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF)
181 #define AL_RX_LOCK(_sc)         mtx_lock(&((_sc)->if_rx_lock))
182 #define AL_RX_UNLOCK(_sc)       mtx_unlock(&((_sc)->if_rx_lock))
183
184 /* helper functions */
185 static int al_is_device_supported(device_t);
186
187 static void al_eth_init_rings(struct al_eth_adapter *);
188 static void al_eth_flow_ctrl_disable(struct al_eth_adapter *);
189 int al_eth_fpga_read_pci_config(void *, int, uint32_t *);
190 int al_eth_fpga_write_pci_config(void *, int, uint32_t);
191 int al_eth_read_pci_config(void *, int, uint32_t *);
192 int al_eth_write_pci_config(void *, int, uint32_t);
193 void al_eth_irq_config(uint32_t *, uint32_t);
194 void al_eth_forward_int_config(uint32_t *, uint32_t);
195 static void al_eth_start_xmit(void *, int);
196 static void al_eth_rx_recv_work(void *, int);
197 static int al_eth_up(struct al_eth_adapter *);
198 static void al_eth_down(struct al_eth_adapter *);
199 static void al_eth_interrupts_unmask(struct al_eth_adapter *);
200 static void al_eth_interrupts_mask(struct al_eth_adapter *);
201 static int al_eth_check_mtu(struct al_eth_adapter *, int);
202 static uint64_t al_get_counter(struct ifnet *, ift_counter);
203 static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int);
204 static int al_eth_board_params_init(struct al_eth_adapter *);
205 static int al_media_update(struct ifnet *);
206 static void al_media_status(struct ifnet *, struct ifmediareq *);
207 static int al_eth_function_reset(struct al_eth_adapter *);
208 static int al_eth_hw_init_adapter(struct al_eth_adapter *);
209 static void al_eth_serdes_init(struct al_eth_adapter *);
210 static void al_eth_lm_config(struct al_eth_adapter *);
211 static int al_eth_hw_init(struct al_eth_adapter *);
212
213 static void al_tick_stats(void *);
214
215 /* ifnet entry points */
216 static void al_init(void *);
217 static int al_mq_start(struct ifnet *, struct mbuf *);
218 static void al_qflush(struct ifnet *);
219 static int al_ioctl(struct ifnet * ifp, u_long, caddr_t);
220
221 /* bus entry points */
222 static int al_probe(device_t);
223 static int al_attach(device_t);
224 static int al_detach(device_t);
225 static int al_shutdown(device_t);
226
227 /* mii bus support routines */
228 static int al_miibus_readreg(device_t, int, int);
229 static int al_miibus_writereg(device_t, int, int, int);
230 static void al_miibus_statchg(device_t);
231 static void al_miibus_linkchg(device_t);
232
233 struct al_eth_adapter* g_adapters[16];
234 uint32_t g_adapters_count;
235
236 /* flag for napi-like mbuf processing, controlled from sysctl */
237 static int napi = 0;
238
239 static device_method_t al_methods[] = {
240         /* Device interface */
241         DEVMETHOD(device_probe,         al_probe),
242         DEVMETHOD(device_attach,        al_attach),
243         DEVMETHOD(device_detach,        al_detach),
244         DEVMETHOD(device_shutdown,      al_shutdown),
245
246         DEVMETHOD(miibus_readreg,       al_miibus_readreg),
247         DEVMETHOD(miibus_writereg,      al_miibus_writereg),
248         DEVMETHOD(miibus_statchg,       al_miibus_statchg),
249         DEVMETHOD(miibus_linkchg,       al_miibus_linkchg),
250         { 0, 0 }
251 };
252
253 static driver_t al_driver = {
254         "al",
255         al_methods,
256         sizeof(struct al_eth_adapter),
257 };
258
259 DRIVER_MODULE(al, pci, al_driver, al_devclass, 0, 0);
260 DRIVER_MODULE(miibus, al, miibus_driver, miibus_devclass, 0, 0);
261
262 static int
263 al_probe(device_t dev)
264 {
265         if ((al_is_device_supported(dev)) != 0) {
266                 device_set_desc(dev, "al");
267                 return (BUS_PROBE_DEFAULT);
268         }
269         return (ENXIO);
270 }
271
272 static int
273 al_attach(device_t dev)
274 {
275         struct al_eth_adapter *adapter;
276         struct sysctl_oid_list *child;
277         struct sysctl_ctx_list *ctx;
278         struct sysctl_oid *tree;
279         struct ifnet *ifp;
280         uint32_t dev_id;
281         uint32_t rev_id;
282         int bar_udma;
283         int bar_mac;
284         int bar_ec;
285         int err;
286
287         err = 0;
288         ifp = NULL;
289         dev_id = rev_id = 0;
290         ctx = device_get_sysctl_ctx(dev);
291         tree = SYSCTL_PARENT(device_get_sysctl_tree(dev));
292         child = SYSCTL_CHILDREN(tree);
293
294         if (g_adapters_count == 0) {
295                 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi",
296                     CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism");
297         }
298         adapter = device_get_softc(dev);
299         adapter->dev = dev;
300         adapter->board_type = ALPINE_INTEGRATED;
301         snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s",
302             device_get_nameunit(dev));
303         AL_RX_LOCK_INIT(adapter);
304
305         g_adapters[g_adapters_count] = adapter;
306
307         bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR);
308         adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
309             &bar_udma, RF_ACTIVE);
310         if (adapter->udma_res == NULL) {
311                 device_printf(adapter->dev,
312                     "could not allocate memory resources for DMA.\n");
313                 err = ENOMEM;
314                 goto err_res_dma;
315         }
316         adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res),
317             rman_get_bushandle(adapter->udma_res));
318         bar_mac = PCIR_BAR(AL_ETH_MAC_BAR);
319         adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
320             &bar_mac, RF_ACTIVE);
321         if (adapter->mac_res == NULL) {
322                 device_printf(adapter->dev,
323                     "could not allocate memory resources for MAC.\n");
324                 err = ENOMEM;
325                 goto err_res_mac;
326         }
327         adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res),
328             rman_get_bushandle(adapter->mac_res));
329
330         bar_ec = PCIR_BAR(AL_ETH_EC_BAR);
331         adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec,
332             RF_ACTIVE);
333         if (adapter->ec_res == NULL) {
334                 device_printf(adapter->dev,
335                     "could not allocate memory resources for EC.\n");
336                 err = ENOMEM;
337                 goto err_res_ec;
338         }
339         adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res),
340             rman_get_bushandle(adapter->ec_res));
341
342         adapter->netdev = ifp = if_alloc(IFT_ETHER);
343
344         adapter->netdev->if_link_state = LINK_STATE_DOWN;
345
346         ifp->if_softc = adapter;
347         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
348         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
349         ifp->if_flags = ifp->if_drv_flags;
350         ifp->if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI;
351         ifp->if_transmit = al_mq_start;
352         ifp->if_qflush = al_qflush;
353         ifp->if_ioctl = al_ioctl;
354         ifp->if_init = al_init;
355         ifp->if_get_counter = al_get_counter;
356         ifp->if_mtu = AL_DEFAULT_MTU;
357
358         adapter->if_flags = ifp->if_flags;
359
360         ifp->if_capabilities = ifp->if_capenable = 0;
361
362         ifp->if_capabilities |= IFCAP_HWCSUM |
363             IFCAP_HWCSUM_IPV6 | IFCAP_TSO |
364             IFCAP_LRO | IFCAP_JUMBO_MTU;
365
366         ifp->if_capenable = ifp->if_capabilities;
367
368         adapter->id_number = g_adapters_count;
369
370         if (adapter->board_type == ALPINE_INTEGRATED) {
371                 dev_id = pci_get_device(adapter->dev);
372                 rev_id = pci_get_revid(adapter->dev);
373         } else {
374                 al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
375                     PCIR_DEVICE, &dev_id);
376                 al_eth_fpga_read_pci_config(adapter->internal_pcie_base,
377                     PCIR_REVID, &rev_id);
378         }
379
380         adapter->dev_id = dev_id;
381         adapter->rev_id = rev_id;
382
383         /* set default ring sizes */
384         adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS;
385         adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS;
386         adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS;
387         adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS;
388
389         adapter->num_tx_queues = AL_ETH_NUM_QUEUES;
390         adapter->num_rx_queues = AL_ETH_NUM_QUEUES;
391
392         adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN;
393         adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL;
394         adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE;
395
396         al_eth_req_rx_buff_size(adapter, adapter->netdev->if_mtu);
397
398         adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX;
399
400         err = al_eth_board_params_init(adapter);
401         if (err != 0)
402                 goto err;
403
404         if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) {
405                 ifmedia_init(&adapter->media, IFM_IMASK,
406                     al_media_update, al_media_status);
407                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
408                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
409                 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
410                 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
411         }
412
413         al_eth_function_reset(adapter);
414
415         err = al_eth_hw_init_adapter(adapter);
416         if (err != 0)
417                 goto err;
418
419         al_eth_init_rings(adapter);
420         g_adapters_count++;
421
422         al_eth_lm_config(adapter);
423         mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF);
424         mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF);
425         callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0);
426         callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0);
427
428         ether_ifattach(ifp, adapter->mac_addr);
429         ifp->if_mtu = AL_DEFAULT_MTU;
430
431         if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
432                 al_eth_hw_init(adapter);
433
434                 /* Attach PHY(s) */
435                 err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev,
436                     al_media_update, al_media_status, BMSR_DEFCAPMASK, 0,
437                     MII_OFFSET_ANY, 0);
438                 if (err != 0) {
439                         device_printf(adapter->dev, "attaching PHYs failed\n");
440                         return (err);
441                 }
442
443                 adapter->mii = device_get_softc(adapter->miibus);
444         }
445
446         return (err);
447
448 err:
449         bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res);
450 err_res_ec:
451         bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res);
452 err_res_mac:
453         bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res);
454 err_res_dma:
455         return (err);
456 }
457
458 static int
459 al_detach(device_t dev)
460 {
461         struct al_eth_adapter *adapter;
462
463         adapter = device_get_softc(dev);
464         ether_ifdetach(adapter->netdev);
465
466         mtx_destroy(&adapter->stats_mtx);
467         mtx_destroy(&adapter->wd_mtx);
468
469         al_eth_down(adapter);
470
471         bus_release_resource(dev, SYS_RES_IRQ,    0, adapter->irq_res);
472         bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res);
473         bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res);
474         bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res);
475
476         return (0);
477 }
478
479 int
480 al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val)
481 {
482
483         /* handle is the base address of the adapter */
484         *val = al_reg_read32((void*)((u_long)handle + where));
485
486         return (0);
487 }
488
489 int
490 al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val)
491 {
492
493         /* handle is the base address of the adapter */
494         al_reg_write32((void*)((u_long)handle + where), val);
495         return (0);
496 }
497
498 int
499 al_eth_read_pci_config(void *handle, int where, uint32_t *val)
500 {
501
502         /* handle is a pci_dev */
503         *val = pci_read_config((device_t)handle, where, sizeof(*val));
504         return (0);
505 }
506
507 int
508 al_eth_write_pci_config(void *handle, int where, uint32_t val)
509 {
510
511         /* handle is a pci_dev */
512         pci_write_config((device_t)handle, where, val, sizeof(val));
513         return (0);
514 }
515
516 void
517 al_eth_irq_config(uint32_t *offset, uint32_t value)
518 {
519
520         al_reg_write32_relaxed(offset, value);
521 }
522
523 void
524 al_eth_forward_int_config(uint32_t *offset, uint32_t value)
525 {
526
527         al_reg_write32(offset, value);
528 }
529
530 static void
531 al_eth_serdes_init(struct al_eth_adapter *adapter)
532 {
533         void __iomem    *serdes_base;
534
535         adapter->serdes_init = false;
536
537         serdes_base = alpine_serdes_resource_get(adapter->serdes_grp);
538         if (serdes_base == NULL) {
539                 device_printf(adapter->dev, "serdes_base get failed!\n");
540                 return;
541         }
542
543         serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base);
544
545         al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp,
546             &adapter->serdes_obj);
547
548         adapter->serdes_init = true;
549 }
550
551 static void
552 al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
553 {
554         bus_addr_t *paddr;
555
556         paddr = arg;
557         *paddr = segs->ds_addr;
558 }
559
560 static int
561 al_dma_alloc_coherent(struct device *dev, bus_dma_tag_t *tag, bus_dmamap_t *map,
562     bus_addr_t *baddr, void **vaddr, uint32_t size)
563 {
564         int ret;
565         uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE;
566
567         ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0,
568             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
569             maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag);
570         if (ret != 0) {
571                 device_printf(dev,
572                     "failed to create bus tag, ret = %d\n", ret);
573                 return (ret);
574         }
575
576         ret = bus_dmamem_alloc(*tag, vaddr,
577             BUS_DMA_COHERENT | BUS_DMA_ZERO, map);
578         if (ret != 0) {
579                 device_printf(dev,
580                     "failed to allocate dmamem, ret = %d\n", ret);
581                 return (ret);
582         }
583
584         ret = bus_dmamap_load(*tag, *map, *vaddr,
585             size, al_dma_map_addr, baddr, 0);
586         if (ret != 0) {
587                 device_printf(dev,
588                     "failed to allocate bus_dmamap_load, ret = %d\n", ret);
589                 return (ret);
590         }
591
592         return (0);
593 }
594
595 static void
596 al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr)
597 {
598
599         bus_dmamap_unload(tag, map);
600         bus_dmamem_free(tag, vaddr, map);
601         bus_dma_tag_destroy(tag);
602 }
603
604 static void
605 al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter,
606     uint8_t idx, uint8_t *addr, uint8_t udma_mask)
607 {
608         struct al_eth_fwd_mac_table_entry entry = { { 0 } };
609
610         memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr));
611
612         memset(entry.mask, 0xff, sizeof(entry.mask));
613         entry.rx_valid = true;
614         entry.tx_valid = false;
615         entry.udma_mask = udma_mask;
616         entry.filter = false;
617
618         device_printf_dbg(adapter->dev,
619             "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
620             __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
621
622         al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
623 }
624
625 static void
626 al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx,
627     uint8_t udma_mask)
628 {
629         struct al_eth_fwd_mac_table_entry entry = { { 0 } };
630
631         memset(entry.addr, 0x00, sizeof(entry.addr));
632         memset(entry.mask, 0x00, sizeof(entry.mask));
633         entry.mask[0] |= 1;
634         entry.addr[0] |= 1;
635
636         entry.rx_valid = true;
637         entry.tx_valid = false;
638         entry.udma_mask = udma_mask;
639         entry.filter = false;
640
641         device_printf_dbg(adapter->dev,
642             "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
643             __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
644
645         al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
646 }
647
648 static void
649 al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter,
650     uint8_t idx, uint8_t udma_mask)
651 {
652         struct al_eth_fwd_mac_table_entry entry = { { 0 } };
653
654         memset(entry.addr, 0xff, sizeof(entry.addr));
655         memset(entry.mask, 0xff, sizeof(entry.mask));
656
657         entry.rx_valid = true;
658         entry.tx_valid = false;
659         entry.udma_mask = udma_mask;
660         entry.filter = false;
661
662         device_printf_dbg(adapter->dev,
663             "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n",
664             __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask));
665
666         al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
667 }
668
669 static void
670 al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter,
671     boolean_t promiscuous)
672 {
673         struct al_eth_fwd_mac_table_entry entry = { { 0 } };
674
675         memset(entry.addr, 0x00, sizeof(entry.addr));
676         memset(entry.mask, 0x00, sizeof(entry.mask));
677
678         entry.rx_valid = true;
679         entry.tx_valid = false;
680         entry.udma_mask = (promiscuous) ? 1 : 0;
681         entry.filter = (promiscuous) ? false : true;
682
683         device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n",
684             __func__, (promiscuous) ? "enter" : "exit");
685
686         al_eth_fwd_mac_table_set(&adapter->hal_adapter,
687             AL_ETH_MAC_TABLE_DROP_IDX, &entry);
688 }
689
690 static void
691 al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx,
692     uint8_t udma, uint32_t queue)
693 {
694
695         if (udma != 0)
696                 panic("only UDMA0 is supporter");
697
698         if (queue >= AL_ETH_NUM_QUEUES)
699                 panic("invalid queue number");
700
701         al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
702 }
703
704 /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */
705 static void
706 al_eth_fsm_table_init(struct al_eth_adapter *adapter)
707 {
708         uint32_t val;
709         int i;
710
711         for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) {
712                 uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i);
713                 switch (outer_type) {
714                 case AL_ETH_FSM_ENTRY_IPV4_TCP:
715                 case AL_ETH_FSM_ENTRY_IPV4_UDP:
716                 case AL_ETH_FSM_ENTRY_IPV6_TCP:
717                 case AL_ETH_FSM_ENTRY_IPV6_UDP:
718                         val = AL_ETH_FSM_DATA_OUTER_4_TUPLE |
719                             AL_ETH_FSM_DATA_HASH_SEL;
720                         break;
721                 case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP:
722                 case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP:
723                         val = AL_ETH_FSM_DATA_OUTER_2_TUPLE |
724                             AL_ETH_FSM_DATA_HASH_SEL;
725                         break;
726                 default:
727                         val = AL_ETH_FSM_DATA_DEFAULT_Q |
728                             AL_ETH_FSM_DATA_DEFAULT_UDMA;
729                 }
730                 al_eth_fsm_table_set(&adapter->hal_adapter, i, val);
731         }
732 }
733
734 static void
735 al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter,
736     uint8_t idx)
737 {
738         struct al_eth_fwd_mac_table_entry entry = { { 0 } };
739
740         device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx);
741
742         al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry);
743 }
744
745 static int
746 al_eth_hw_init_adapter(struct al_eth_adapter *adapter)
747 {
748         struct al_eth_adapter_params *params = &adapter->eth_hal_params;
749         int rc;
750
751         /* params->dev_id = adapter->dev_id; */
752         params->rev_id = adapter->rev_id;
753         params->udma_id = 0;
754         params->enable_rx_parser = 1; /* enable rx epe parser*/
755         params->udma_regs_base = adapter->udma_base; /* UDMA register base address */
756         params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */
757         params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */
758         params->name = adapter->name;
759         params->serdes_lane = adapter->serdes_lane;
760
761         rc = al_eth_adapter_init(&adapter->hal_adapter, params);
762         if (rc != 0)
763                 device_printf(adapter->dev, "%s failed at hal init!\n",
764                     __func__);
765
766         if ((adapter->board_type == ALPINE_NIC) ||
767             (adapter->board_type == ALPINE_FPGA_NIC)) {
768                 /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */
769                 struct al_udma_gen_tgtid_conf conf;
770                 int i;
771                 for (i = 0; i < DMA_MAX_Q; i++) {
772                         conf.tx_q_conf[i].queue_en = AL_TRUE;
773                         conf.tx_q_conf[i].desc_en = AL_FALSE;
774                         conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
775                         conf.rx_q_conf[i].queue_en = AL_TRUE;
776                         conf.rx_q_conf[i].desc_en = AL_FALSE;
777                         conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */
778                 }
779                 al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf);
780         }
781
782         return (rc);
783 }
784
785 static void
786 al_eth_lm_config(struct al_eth_adapter *adapter)
787 {
788         struct al_eth_lm_init_params params = {0};
789
790         params.adapter = &adapter->hal_adapter;
791         params.serdes_obj = &adapter->serdes_obj;
792         params.lane = adapter->serdes_lane;
793         params.sfp_detection = adapter->sfp_detection_needed;
794         if (adapter->sfp_detection_needed == true) {
795                 params.sfp_bus_id = adapter->i2c_adapter_id;
796                 params.sfp_i2c_addr = SFP_I2C_ADDR;
797         }
798
799         if (adapter->sfp_detection_needed == false) {
800                 switch (adapter->mac_mode) {
801                 case AL_ETH_MAC_MODE_10GbE_Serial:
802                         if ((adapter->lt_en != 0) && (adapter->an_en != 0))
803                                 params.default_mode = AL_ETH_LM_MODE_10G_DA;
804                         else
805                                 params.default_mode = AL_ETH_LM_MODE_10G_OPTIC;
806                         break;
807                 case AL_ETH_MAC_MODE_SGMII:
808                         params.default_mode = AL_ETH_LM_MODE_1G;
809                         break;
810                 default:
811                         params.default_mode = AL_ETH_LM_MODE_10G_DA;
812                 }
813         } else
814                 params.default_mode = AL_ETH_LM_MODE_10G_DA;
815
816         params.link_training = adapter->lt_en;
817         params.rx_equal = true;
818         params.static_values = !adapter->dont_override_serdes;
819         params.i2c_context = adapter;
820         params.kr_fec_enable = false;
821
822         params.retimer_exist = adapter->retimer.exist;
823         params.retimer_bus_id = adapter->retimer.bus_id;
824         params.retimer_i2c_addr = adapter->retimer.i2c_addr;
825         params.retimer_channel = adapter->retimer.channel;
826
827         al_eth_lm_init(&adapter->lm_context, &params);
828 }
829
830 static int
831 al_eth_board_params_init(struct al_eth_adapter *adapter)
832 {
833
834         if (adapter->board_type == ALPINE_NIC) {
835                 adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
836                 adapter->sfp_detection_needed = false;
837                 adapter->phy_exist = false;
838                 adapter->an_en = false;
839                 adapter->lt_en = false;
840                 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
841                 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
842         } else if (adapter->board_type == ALPINE_FPGA_NIC) {
843                 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
844                 adapter->sfp_detection_needed = false;
845                 adapter->phy_exist = false;
846                 adapter->an_en = false;
847                 adapter->lt_en = false;
848                 adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ;
849                 adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
850         } else {
851                 struct al_eth_board_params params;
852                 int rc;
853
854                 adapter->auto_speed = false;
855
856                 rc = al_eth_board_params_get(adapter->mac_base, &params);
857                 if (rc != 0) {
858                         device_printf(adapter->dev,
859                             "board info not available\n");
860                         return (-1);
861                 }
862
863                 adapter->phy_exist = params.phy_exist == TRUE;
864                 adapter->phy_addr = params.phy_mdio_addr;
865                 adapter->an_en = params.autoneg_enable;
866                 adapter->lt_en = params.kr_lt_enable;
867                 adapter->serdes_grp = params.serdes_grp;
868                 adapter->serdes_lane = params.serdes_lane;
869                 adapter->sfp_detection_needed = params.sfp_plus_module_exist;
870                 adapter->i2c_adapter_id = params.i2c_adapter_id;
871                 adapter->ref_clk_freq = params.ref_clk_freq;
872                 adapter->dont_override_serdes = params.dont_override_serdes;
873                 adapter->link_config.active_duplex = !params.half_duplex;
874                 adapter->link_config.autoneg = !params.an_disable;
875                 adapter->link_config.force_1000_base_x = params.force_1000_base_x;
876                 adapter->retimer.exist = params.retimer_exist;
877                 adapter->retimer.bus_id = params.retimer_bus_id;
878                 adapter->retimer.i2c_addr = params.retimer_i2c_addr;
879                 adapter->retimer.channel = params.retimer_channel;
880
881                 switch (params.speed) {
882                 default:
883                         device_printf(adapter->dev,
884                             "%s: invalid speed (%d)\n", __func__, params.speed);
885                 case AL_ETH_BOARD_1G_SPEED_1000M:
886                         adapter->link_config.active_speed = 1000;
887                         break;
888                 case AL_ETH_BOARD_1G_SPEED_100M:
889                         adapter->link_config.active_speed = 100;
890                         break;
891                 case AL_ETH_BOARD_1G_SPEED_10M:
892                         adapter->link_config.active_speed = 10;
893                         break;
894                 }
895
896                 switch (params.mdio_freq) {
897                 default:
898                         device_printf(adapter->dev,
899                             "%s: invalid mdio freq (%d)\n", __func__,
900                             params.mdio_freq);
901                 case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ:
902                         adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ;
903                         break;
904                 case AL_ETH_BOARD_MDIO_FREQ_1_MHZ:
905                         adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ;
906                         break;
907                 }
908
909                 switch (params.media_type) {
910                 case AL_ETH_BOARD_MEDIA_TYPE_RGMII:
911                         if (params.sfp_plus_module_exist == TRUE)
912                                 /* Backward compatibility */
913                                 adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
914                         else
915                                 adapter->mac_mode = AL_ETH_MAC_MODE_RGMII;
916
917                         adapter->use_lm = false;
918                         break;
919                 case AL_ETH_BOARD_MEDIA_TYPE_SGMII:
920                         adapter->mac_mode = AL_ETH_MAC_MODE_SGMII;
921                         adapter->use_lm = true;
922                         break;
923                 case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR:
924                         adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
925                         adapter->use_lm = true;
926                         break;
927                 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT:
928                         adapter->sfp_detection_needed = TRUE;
929                         adapter->auto_speed = false;
930                         adapter->use_lm = true;
931                         break;
932                 case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED:
933                         adapter->sfp_detection_needed = TRUE;
934                         adapter->auto_speed = true;
935                         adapter->mac_mode_set = false;
936                         adapter->use_lm = true;
937
938                         adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial;
939                         break;
940                 default:
941                         device_printf(adapter->dev,
942                             "%s: unsupported media type %d\n",
943                             __func__, params.media_type);
944                         return (-1);
945                 }
946
947                 device_printf(adapter->dev,
948                     "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. "
949                     "SFP connected %s. media %d\n",
950                     params.phy_exist == TRUE ? "Yes" : "No",
951                     params.phy_mdio_addr, adapter->mdio_freq,
952                     params.sfp_plus_module_exist == TRUE ? "Yes" : "No",
953                     params.media_type);
954         }
955
956         al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
957
958         return (0);
959 }
960
961 static int
962 al_eth_function_reset(struct al_eth_adapter *adapter)
963 {
964         struct al_eth_board_params params;
965         int rc;
966
967         /* save board params so we restore it after reset */
968         al_eth_board_params_get(adapter->mac_base, &params);
969         al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr);
970         if (adapter->board_type == ALPINE_INTEGRATED)
971                 rc = al_eth_flr_rmn(&al_eth_read_pci_config,
972                     &al_eth_write_pci_config,
973                     adapter->dev, adapter->mac_base);
974         else
975                 rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config,
976                     &al_eth_fpga_write_pci_config,
977                     adapter->internal_pcie_base, adapter->mac_base);
978
979         /* restore params */
980         al_eth_board_params_set(adapter->mac_base, &params);
981         al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr);
982
983         return (rc);
984 }
985
986 static void
987 al_eth_init_rings(struct al_eth_adapter *adapter)
988 {
989         int i;
990
991         for (i = 0; i < adapter->num_tx_queues; i++) {
992                 struct al_eth_ring *ring = &adapter->tx_ring[i];
993
994                 ring->ring_id = i;
995                 ring->dev = adapter->dev;
996                 ring->adapter = adapter;
997                 ring->netdev = adapter->netdev;
998                 al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i,
999                     &ring->dma_q);
1000                 ring->sw_count = adapter->tx_ring_count;
1001                 ring->hw_count = adapter->tx_descs_count;
1002                 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1003                 ring->unmask_val = ~(1 << i);
1004         }
1005
1006         for (i = 0; i < adapter->num_rx_queues; i++) {
1007                 struct al_eth_ring *ring = &adapter->rx_ring[i];
1008
1009                 ring->ring_id = i;
1010                 ring->dev = adapter->dev;
1011                 ring->adapter = adapter;
1012                 ring->netdev = adapter->netdev;
1013                 al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q);
1014                 ring->sw_count = adapter->rx_ring_count;
1015                 ring->hw_count = adapter->rx_descs_count;
1016                 ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get(
1017                     (struct unit_regs *)adapter->udma_base,
1018                     AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1019                 ring->unmask_val = ~(1 << i);
1020         }
1021 }
1022
1023 static void
1024 al_init_locked(void *arg)
1025 {
1026         struct al_eth_adapter *adapter = arg;
1027         if_t ifp = adapter->netdev;
1028         int rc = 0;
1029
1030         al_eth_down(adapter);
1031         rc = al_eth_up(adapter);
1032
1033         ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1034         if (rc == 0)
1035                 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1036 }
1037
1038 static void
1039 al_init(void *arg)
1040 {
1041         struct al_eth_adapter *adapter = arg;
1042
1043         al_init_locked(adapter);
1044 }
1045
1046 static inline int
1047 al_eth_alloc_rx_buf(struct al_eth_adapter *adapter,
1048     struct al_eth_ring *rx_ring,
1049     struct al_eth_rx_buffer *rx_info)
1050 {
1051         struct al_buf *al_buf;
1052         bus_dma_segment_t segs[2];
1053         int error;
1054         int nsegs;
1055
1056         if (rx_info->m != NULL)
1057                 return (0);
1058
1059         rx_info->data_size = adapter->rx_mbuf_sz;
1060
1061         AL_RX_LOCK(adapter);
1062
1063         /* Get mbuf using UMA allocator */
1064         rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
1065             rx_info->data_size);
1066         AL_RX_UNLOCK(adapter);
1067
1068         if (rx_info->m == NULL)
1069                 return (ENOMEM);
1070
1071         rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz;
1072
1073         /* Map packets for DMA */
1074         error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map,
1075             rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT);
1076         if (__predict_false(error)) {
1077                 device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n",
1078                     error);
1079                 m_freem(rx_info->m);
1080                 rx_info->m = NULL;
1081                 return (EFAULT);
1082         }
1083
1084         al_buf = &rx_info->al_buf;
1085         al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET;
1086         al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET;
1087
1088         return (0);
1089 }
1090
1091 static int
1092 al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid,
1093     unsigned int num)
1094 {
1095         struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
1096         uint16_t next_to_use;
1097         unsigned int i;
1098
1099         next_to_use = rx_ring->next_to_use;
1100
1101         for (i = 0; i < num; i++) {
1102                 int rc;
1103                 struct al_eth_rx_buffer *rx_info =
1104                     &rx_ring->rx_buffer_info[next_to_use];
1105
1106                 if (__predict_false(al_eth_alloc_rx_buf(adapter,
1107                     rx_ring, rx_info) < 0)) {
1108                         device_printf(adapter->dev,
1109                             "failed to alloc buffer for rx queue %d\n", qid);
1110                         break;
1111                 }
1112
1113                 rc = al_eth_rx_buffer_add(rx_ring->dma_q,
1114                     &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL);
1115                 if (__predict_false(rc)) {
1116                         device_printf(adapter->dev,
1117                             "failed to add buffer for rx queue %d\n", qid);
1118                         break;
1119                 }
1120
1121                 next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use);
1122         }
1123
1124         if (__predict_false(i < num))
1125                 device_printf(adapter->dev,
1126                     "refilled rx queue %d with %d pages only - available %d\n",
1127                     qid, i, al_udma_available_get(rx_ring->dma_q));
1128
1129         if (__predict_true(i))
1130                 al_eth_rx_buffer_action(rx_ring->dma_q, i);
1131
1132         rx_ring->next_to_use = next_to_use;
1133
1134         return (i);
1135 }
1136
1137 /*
1138  * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers
1139  * @adapter: board private structure
1140  */
1141 static void
1142 al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter)
1143 {
1144         int i;
1145
1146         for (i = 0; i < adapter->num_rx_queues; i++)
1147                 al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1);
1148 }
1149
1150 static void
1151 al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring)
1152 {
1153         unsigned int total_done;
1154         uint16_t next_to_clean;
1155         int qid = tx_ring->ring_id;
1156
1157         total_done = al_eth_comp_tx_get(tx_ring->dma_q);
1158         device_printf_dbg(tx_ring->dev,
1159             "tx_poll: q %d total completed descs %x\n", qid, total_done);
1160         next_to_clean = tx_ring->next_to_clean;
1161
1162         while (total_done != 0) {
1163                 struct al_eth_tx_buffer *tx_info;
1164                 struct mbuf *mbuf;
1165
1166                 tx_info = &tx_ring->tx_buffer_info[next_to_clean];
1167                 /* stop if not all descriptors of the packet are completed */
1168                 if (tx_info->tx_descs > total_done)
1169                         break;
1170
1171                 mbuf = tx_info->m;
1172
1173                 tx_info->m = NULL;
1174
1175                 device_printf_dbg(tx_ring->dev,
1176                     "tx_poll: q %d mbuf %p completed\n", qid, mbuf);
1177
1178                 /* map is no longer required */
1179                 bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map);
1180
1181                 m_freem(mbuf);
1182                 total_done -= tx_info->tx_descs;
1183                 next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean);
1184         }
1185
1186         tx_ring->next_to_clean = next_to_clean;
1187
1188         device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n",
1189             qid, next_to_clean);
1190
1191         /*
1192          * need to make the rings circular update visible to
1193          * al_eth_start_xmit() before checking for netif_queue_stopped().
1194          */
1195         al_smp_data_memory_barrier();
1196 }
1197
1198 static void
1199 al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info,
1200     struct al_eth_pkt *hal_pkt, struct mbuf *m)
1201 {
1202         uint32_t mss = m->m_pkthdr.tso_segsz;
1203         struct ether_vlan_header *eh;
1204         uint16_t etype;
1205 #ifdef INET
1206         struct ip *ip;
1207 #endif
1208 #ifdef INET6
1209         struct ip6_hdr *ip6;
1210 #endif
1211         struct tcphdr *th = NULL;
1212         int     ehdrlen, ip_hlen = 0;
1213         uint8_t ipproto = 0;
1214         uint32_t offload = 0;
1215
1216         if (mss != 0)
1217                 offload = 1;
1218
1219         if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0)
1220                 offload = 1;
1221
1222         if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0)
1223                 offload = 1;
1224
1225         if (offload != 0) {
1226                 struct al_eth_meta_data *meta = &tx_ring->hal_meta;
1227
1228                 if (mss != 0)
1229                         hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO |
1230                             AL_ETH_TX_FLAGS_L4_CSUM);
1231                 else
1232                         hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM |
1233                             AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM);
1234
1235                 /*
1236                  * Determine where frame payload starts.
1237                  * Jump over vlan headers if already present,
1238                  * helpful for QinQ too.
1239                  */
1240                 eh = mtod(m, struct ether_vlan_header *);
1241                 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1242                         etype = ntohs(eh->evl_proto);
1243                         ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1244                 } else {
1245                         etype = ntohs(eh->evl_encap_proto);
1246                         ehdrlen = ETHER_HDR_LEN;
1247                 }
1248
1249                 switch (etype) {
1250 #ifdef INET
1251                 case ETHERTYPE_IP:
1252                         ip = (struct ip *)(m->m_data + ehdrlen);
1253                         ip_hlen = ip->ip_hl << 2;
1254                         ipproto = ip->ip_p;
1255                         hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4;
1256                         th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
1257                         if (mss != 0)
1258                                 hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM;
1259                         if (ipproto == IPPROTO_TCP)
1260                                 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1261                         else
1262                                 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1263                         break;
1264 #endif /* INET */
1265 #ifdef INET6
1266                 case ETHERTYPE_IPV6:
1267                         ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1268                         hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6;
1269                         ip_hlen = sizeof(struct ip6_hdr);
1270                         th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
1271                         ipproto = ip6->ip6_nxt;
1272                         if (ipproto == IPPROTO_TCP)
1273                                 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP;
1274                         else
1275                                 hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP;
1276                         break;
1277 #endif /* INET6 */
1278                 default:
1279                         break;
1280                 }
1281
1282                 meta->words_valid = 4;
1283                 meta->l3_header_len = ip_hlen;
1284                 meta->l3_header_offset = ehdrlen;
1285                 if (th != NULL)
1286                         meta->l4_header_len = th->th_off; /* this param needed only for TSO */
1287                 meta->mss_idx_sel = 0;                  /* check how to select MSS */
1288                 meta->mss_val = mss;
1289                 hal_pkt->meta = meta;
1290         } else
1291                 hal_pkt->meta = NULL;
1292 }
1293
1294 #define XMIT_QUEUE_TIMEOUT      100
1295
1296 static void
1297 al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m)
1298 {
1299         struct al_eth_tx_buffer *tx_info;
1300         int error;
1301         int nsegs, a;
1302         uint16_t next_to_use;
1303         bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1];
1304         struct al_eth_pkt *hal_pkt;
1305         struct al_buf *al_buf;
1306         boolean_t remap;
1307
1308         /* Check if queue is ready */
1309         if (unlikely(tx_ring->stall) != 0) {
1310                 for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) {
1311                         if (al_udma_available_get(tx_ring->dma_q) >=
1312                             (AL_ETH_DEFAULT_TX_HW_DESCS -
1313                             AL_ETH_TX_WAKEUP_THRESH)) {
1314                                 tx_ring->stall = 0;
1315                                 break;
1316                         }
1317                         pause("stall", 1);
1318                 }
1319                 if (a == XMIT_QUEUE_TIMEOUT) {
1320                         device_printf(tx_ring->dev,
1321                             "timeout waiting for queue %d ready!\n",
1322                             tx_ring->ring_id);
1323                         return;
1324                 } else {
1325                         device_printf_dbg(tx_ring->dev,
1326                             "queue %d is ready!\n", tx_ring->ring_id);
1327                 }
1328         }
1329
1330         next_to_use = tx_ring->next_to_use;
1331         tx_info = &tx_ring->tx_buffer_info[next_to_use];
1332         tx_info->m = m;
1333         hal_pkt = &tx_info->hal_pkt;
1334
1335         if (m == NULL) {
1336                 device_printf(tx_ring->dev, "mbuf is NULL\n");
1337                 return;
1338         }
1339
1340         remap = TRUE;
1341         /* Map packets for DMA */
1342 retry:
1343         error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map,
1344             m, segs, &nsegs, BUS_DMA_NOWAIT);
1345         if (__predict_false(error)) {
1346                 struct mbuf *m_new;
1347
1348                 if (error == EFBIG) {
1349                         /* Try it again? - one try */
1350                         if (remap == TRUE) {
1351                                 remap = FALSE;
1352                                 m_new = m_defrag(m, M_NOWAIT);
1353                                 if (m_new == NULL) {
1354                                         device_printf(tx_ring->dev,
1355                                             "failed to defrag mbuf\n");
1356                                         goto exit;
1357                                 }
1358                                 m = m_new;
1359                                 goto retry;
1360                         } else {
1361                                 device_printf(tx_ring->dev,
1362                                     "failed to map mbuf, error %d\n", error);
1363                                 goto exit;
1364                         }
1365                 } else {
1366                         device_printf(tx_ring->dev,
1367                             "failed to map mbuf, error %d\n", error);
1368                         goto exit;
1369                 }
1370         }
1371
1372         /* set flags and meta data */
1373         hal_pkt->flags = AL_ETH_TX_FLAGS_INT;
1374         al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m);
1375
1376         al_buf = hal_pkt->bufs;
1377         for (a = 0; a < nsegs; a++) {
1378                 al_buf->addr = segs[a].ds_addr;
1379                 al_buf->len = segs[a].ds_len;
1380
1381                 al_buf++;
1382         }
1383
1384         hal_pkt->num_of_bufs = nsegs;
1385
1386         /* prepare the packet's descriptors to dma engine */
1387         tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt);
1388
1389         if (tx_info->tx_descs == 0)
1390                 goto exit;
1391
1392         /*
1393          * stop the queue when no more space available, the packet can have up
1394          * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor
1395          */
1396         if (unlikely(al_udma_available_get(tx_ring->dma_q) <
1397             (AL_ETH_PKT_MAX_BUFS + 2))) {
1398                 tx_ring->stall = 1;
1399                 device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n",
1400                     tx_ring->ring_id);
1401                 al_data_memory_barrier();
1402         }
1403
1404         tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use);
1405
1406         /* trigger the dma engine */
1407         al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs);
1408         return;
1409
1410 exit:
1411         m_freem(m);
1412 }
1413
1414 static void
1415 al_eth_tx_cmpl_work(void *arg, int pending)
1416 {
1417         struct al_eth_ring *tx_ring = arg;
1418
1419         if (napi != 0) {
1420                 tx_ring->cmpl_is_running = 1;
1421                 al_data_memory_barrier();
1422         }
1423
1424         al_eth_tx_do_cleanup(tx_ring);
1425
1426         if (napi != 0) {
1427                 tx_ring->cmpl_is_running = 0;
1428                 al_data_memory_barrier();
1429         }
1430         /* all work done, enable IRQs */
1431         al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val);
1432 }
1433
1434 static int
1435 al_eth_tx_cmlp_irq_filter(void *arg)
1436 {
1437         struct al_eth_ring *tx_ring = arg;
1438
1439         /* Interrupt should be auto-masked upon arrival */
1440
1441         device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__,
1442             tx_ring->ring_id);
1443
1444         /*
1445          * For napi, if work is not running, schedule it. Always schedule
1446          * for casual (non-napi) packet handling.
1447          */
1448         if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0))
1449                 taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
1450
1451         /* Do not run bottom half */
1452         return (FILTER_HANDLED);
1453 }
1454
1455 static int
1456 al_eth_rx_recv_irq_filter(void *arg)
1457 {
1458         struct al_eth_ring *rx_ring = arg;
1459
1460         /* Interrupt should be auto-masked upon arrival */
1461
1462         device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__,
1463             rx_ring->ring_id);
1464
1465         /*
1466          * For napi, if work is not running, schedule it. Always schedule
1467          * for casual (non-napi) packet handling.
1468          */
1469         if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0))
1470                 taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
1471
1472         /* Do not run bottom half */
1473         return (FILTER_HANDLED);
1474 }
1475
1476 /*
1477  * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum
1478  * @adapter: structure containing adapter specific data
1479  * @hal_pkt: HAL structure for the packet
1480  * @mbuf: mbuf currently being received and modified
1481  */
1482 static inline void
1483 al_eth_rx_checksum(struct al_eth_adapter *adapter,
1484     struct al_eth_pkt *hal_pkt, struct mbuf *mbuf)
1485 {
1486
1487         /* if IPv4 and error */
1488         if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM) &&
1489             (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) &&
1490             (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1491                 device_printf(adapter->dev,"rx ipv4 header checksum error\n");
1492                 return;
1493         }
1494
1495         /* if IPv6 and error */
1496         if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM_IPV6) &&
1497             (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) &&
1498             (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) {
1499                 device_printf(adapter->dev,"rx ipv6 header checksum error\n");
1500                 return;
1501         }
1502
1503         /* if TCP/UDP */
1504         if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) ||
1505            (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) {
1506                 if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) {
1507                         device_printf_dbg(adapter->dev, "rx L4 checksum error\n");
1508
1509                         /* TCP/UDP checksum error */
1510                         mbuf->m_pkthdr.csum_flags = 0;
1511                 } else {
1512                         device_printf_dbg(adapter->dev, "rx checksum correct\n");
1513
1514                         /* IP Checksum Good */
1515                         mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
1516                         mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1517                 }
1518         }
1519 }
1520
1521 static struct mbuf*
1522 al_eth_rx_mbuf(struct al_eth_adapter *adapter,
1523     struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt,
1524     unsigned int descs, uint16_t *next_to_clean)
1525 {
1526         struct mbuf *mbuf;
1527         struct al_eth_rx_buffer *rx_info =
1528             &rx_ring->rx_buffer_info[*next_to_clean];
1529         unsigned int len;
1530
1531         len = hal_pkt->bufs[0].len;
1532         device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info,
1533            rx_info->m);
1534
1535         if (rx_info->m == NULL) {
1536                 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1537                     *next_to_clean);
1538                 return (NULL);
1539         }
1540
1541         mbuf = rx_info->m;
1542         mbuf->m_pkthdr.len = len;
1543         mbuf->m_len = len;
1544         mbuf->m_pkthdr.rcvif = rx_ring->netdev;
1545         mbuf->m_flags |= M_PKTHDR;
1546
1547         if (len <= adapter->small_copy_len) {
1548                 struct mbuf *smbuf;
1549                 device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len);
1550
1551                 AL_RX_LOCK(adapter);
1552                 smbuf = m_gethdr(M_NOWAIT, MT_DATA);
1553                 AL_RX_UNLOCK(adapter);
1554                 if (__predict_false(smbuf == NULL)) {
1555                         device_printf(adapter->dev, "smbuf is NULL\n");
1556                         return (NULL);
1557                 }
1558
1559                 smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1560                 memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len);
1561
1562                 smbuf->m_len = len;
1563                 smbuf->m_pkthdr.rcvif = rx_ring->netdev;
1564
1565                 /* first desc of a non-ps chain */
1566                 smbuf->m_flags |= M_PKTHDR;
1567                 smbuf->m_pkthdr.len = smbuf->m_len;
1568
1569                 *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring,
1570                     *next_to_clean);
1571
1572                 return (smbuf);
1573         }
1574         mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET;
1575
1576         /* Unmap the buffer */
1577         bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map);
1578
1579         rx_info->m = NULL;
1580         *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean);
1581
1582         return (mbuf);
1583 }
1584
1585 static void
1586 al_eth_rx_recv_work(void *arg, int pending)
1587 {
1588         struct al_eth_ring *rx_ring = arg;
1589         struct mbuf *mbuf;
1590         struct lro_entry *queued;
1591         unsigned int qid = rx_ring->ring_id;
1592         struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
1593         uint16_t next_to_clean = rx_ring->next_to_clean;
1594         uint32_t refill_required;
1595         uint32_t refill_actual;
1596         uint32_t do_if_input;
1597
1598         if (napi != 0) {
1599                 rx_ring->enqueue_is_running = 1;
1600                 al_data_memory_barrier();
1601         }
1602
1603         do {
1604                 unsigned int descs;
1605
1606                 descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt);
1607                 if (unlikely(descs == 0))
1608                         break;
1609
1610                 device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet "
1611                     "from hal. descs %d\n", qid, descs);
1612                 device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. "
1613                     "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags,
1614                     hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx);
1615
1616                 /* ignore if detected dma or eth controller errors */
1617                 if ((hal_pkt->flags & (AL_ETH_RX_ERROR |
1618                     AL_UDMA_CDESC_ERROR)) != 0) {
1619                         device_printf(rx_ring->dev, "receive packet with error. "
1620                             "flags = 0x%x\n", hal_pkt->flags);
1621                         next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1622                             next_to_clean, descs);
1623                         continue;
1624                 }
1625
1626                 /* allocate mbuf and fill it */
1627                 mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs,
1628                     &next_to_clean);
1629
1630                 /* exit if we failed to retrieve a buffer */
1631                 if (unlikely(mbuf == NULL)) {
1632                         next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring,
1633                             next_to_clean, descs);
1634                         break;
1635                 }
1636
1637                 if (__predict_true(rx_ring->netdev->if_capenable & IFCAP_RXCSUM ||
1638                     rx_ring->netdev->if_capenable & IFCAP_RXCSUM_IPV6)) {
1639                         al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf);
1640                 }
1641
1642 #if __FreeBSD_version >= 800000
1643                 mbuf->m_pkthdr.flowid = qid;
1644                 M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE);
1645 #endif
1646
1647                 /*
1648                  * LRO is only for IP/TCP packets and TCP checksum of the packet
1649                  * should be computed by hardware.
1650                  */
1651                 do_if_input = 1;
1652                 if ((rx_ring->lro_enabled != 0) &&
1653                     ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) &&
1654                     hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) {
1655                         /*
1656                          * Send to the stack if:
1657                          *  - LRO not enabled, or
1658                          *  - no LRO resources, or
1659                          *  - lro enqueue fails
1660                          */
1661                         if (rx_ring->lro.lro_cnt != 0) {
1662                                 if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0)
1663                                         do_if_input = 0;
1664                         }
1665                 }
1666
1667                 if (do_if_input)
1668                         (*rx_ring->netdev->if_input)(rx_ring->netdev, mbuf);
1669
1670         } while (1);
1671
1672         rx_ring->next_to_clean = next_to_clean;
1673
1674         refill_required = al_udma_available_get(rx_ring->dma_q);
1675         refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid,
1676             refill_required);
1677
1678         if (unlikely(refill_actual < refill_required)) {
1679                 device_printf_dbg(rx_ring->dev,
1680                     "%s: not filling rx queue %d\n", __func__, qid);
1681         }
1682
1683         while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
1684                 LIST_REMOVE(queued, next);
1685                 tcp_lro_flush(&rx_ring->lro, queued);
1686         }
1687
1688         if (napi != 0) {
1689                 rx_ring->enqueue_is_running = 0;
1690                 al_data_memory_barrier();
1691         }
1692         /* unmask irq */
1693         al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val);
1694 }
1695
1696 static void
1697 al_eth_start_xmit(void *arg, int pending)
1698 {
1699         struct al_eth_ring *tx_ring = arg;
1700         struct mbuf *mbuf;
1701
1702         if (napi != 0) {
1703                 tx_ring->enqueue_is_running = 1;
1704                 al_data_memory_barrier();
1705         }
1706
1707         while (1) {
1708                 mtx_lock(&tx_ring->br_mtx);
1709                 mbuf = drbr_dequeue(NULL, tx_ring->br);
1710                 mtx_unlock(&tx_ring->br_mtx);
1711
1712                 if (mbuf == NULL)
1713                         break;
1714
1715                 al_eth_xmit_mbuf(tx_ring, mbuf);
1716         }
1717
1718         if (napi != 0) {
1719                 tx_ring->enqueue_is_running = 0;
1720                 al_data_memory_barrier();
1721                 while (1) {
1722                         mtx_lock(&tx_ring->br_mtx);
1723                         mbuf = drbr_dequeue(NULL, tx_ring->br);
1724                         mtx_unlock(&tx_ring->br_mtx);
1725                         if (mbuf == NULL)
1726                                 break;
1727                         al_eth_xmit_mbuf(tx_ring, mbuf);
1728                 }
1729         }
1730 }
1731
1732 static int
1733 al_mq_start(struct ifnet *ifp, struct mbuf *m)
1734 {
1735         struct al_eth_adapter *adapter = ifp->if_softc;
1736         struct al_eth_ring *tx_ring;
1737         int i;
1738         int ret;
1739
1740         /* Which queue to use */
1741         if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1742                 i = m->m_pkthdr.flowid % adapter->num_tx_queues;
1743         else
1744                 i = curcpu % adapter->num_tx_queues;
1745
1746         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
1747             IFF_DRV_RUNNING) {
1748                 return (EFAULT);
1749         }
1750
1751         tx_ring = &adapter->tx_ring[i];
1752
1753         device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, "
1754             "sending packet to queue %d\n", i);
1755
1756         ret = drbr_enqueue(ifp, tx_ring->br, m);
1757
1758         /*
1759          * For napi, if work is not running, schedule it. Always schedule
1760          * for casual (non-napi) packet handling.
1761          */
1762         if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0)))
1763                 taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
1764
1765         return (ret);
1766 }
1767
1768 static void
1769 al_qflush(struct ifnet * ifp)
1770 {
1771
1772         /* unused */
1773 }
1774
1775 static inline void
1776 al_eth_flow_ctrl_init(struct al_eth_adapter *adapter)
1777 {
1778         uint8_t default_flow_ctrl;
1779
1780         default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE;
1781         default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE;
1782
1783         adapter->link_config.flow_ctrl_supported = default_flow_ctrl;
1784 }
1785
1786 static int
1787 al_eth_flow_ctrl_config(struct al_eth_adapter *adapter)
1788 {
1789         struct al_eth_flow_control_params *flow_ctrl_params;
1790         uint8_t active = adapter->link_config.flow_ctrl_active;
1791         int i;
1792
1793         flow_ctrl_params = &adapter->flow_ctrl_params;
1794
1795         flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE;
1796         flow_ctrl_params->obay_enable =
1797             ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0);
1798         flow_ctrl_params->gen_enable =
1799             ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0);
1800
1801         flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH;
1802         flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW;
1803         flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA;
1804         flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH;
1805
1806         /* map priority to queue index, queue id = priority/2 */
1807         for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
1808                 flow_ctrl_params->prio_q_map[0][i] =  1 << (i >> 1);
1809
1810         al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params);
1811
1812         return (0);
1813 }
1814
1815 static void
1816 al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter)
1817 {
1818
1819         /*
1820          * change the active configuration to the default / force by ethtool
1821          * and call to configure
1822          */
1823         adapter->link_config.flow_ctrl_active =
1824             adapter->link_config.flow_ctrl_supported;
1825
1826         al_eth_flow_ctrl_config(adapter);
1827 }
1828
1829 static void
1830 al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter)
1831 {
1832
1833         adapter->link_config.flow_ctrl_active = 0;
1834         al_eth_flow_ctrl_config(adapter);
1835 }
1836
1837 static int
1838 al_eth_hw_init(struct al_eth_adapter *adapter)
1839 {
1840         int rc;
1841
1842         rc = al_eth_hw_init_adapter(adapter);
1843         if (rc != 0)
1844                 return (rc);
1845
1846         rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode);
1847         if (rc < 0) {
1848                 device_printf(adapter->dev, "%s failed to configure mac!\n",
1849                     __func__);
1850                 return (rc);
1851         }
1852
1853         if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) ||
1854             (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII &&
1855              adapter->phy_exist == FALSE)) {
1856                 rc = al_eth_mac_link_config(&adapter->hal_adapter,
1857                     adapter->link_config.force_1000_base_x,
1858                     adapter->link_config.autoneg,
1859                     adapter->link_config.active_speed,
1860                     adapter->link_config.active_duplex);
1861                 if (rc != 0) {
1862                         device_printf(adapter->dev,
1863                             "%s failed to configure link parameters!\n",
1864                             __func__);
1865                         return (rc);
1866                 }
1867         }
1868
1869         rc = al_eth_mdio_config(&adapter->hal_adapter,
1870             AL_ETH_MDIO_TYPE_CLAUSE_22, TRUE /* shared_mdio_if */,
1871             adapter->ref_clk_freq, adapter->mdio_freq);
1872         if (rc != 0) {
1873                 device_printf(adapter->dev, "%s failed at mdio config!\n",
1874                     __func__);
1875                 return (rc);
1876         }
1877
1878         al_eth_flow_ctrl_init(adapter);
1879
1880         return (rc);
1881 }
1882
1883 static int
1884 al_eth_hw_stop(struct al_eth_adapter *adapter)
1885 {
1886
1887         al_eth_mac_stop(&adapter->hal_adapter);
1888
1889         /*
1890          * wait till pending rx packets written and UDMA becomes idle,
1891          * the MAC has ~10KB fifo, 10us should be enought time for the
1892          * UDMA to write to the memory
1893          */
1894         DELAY(10);
1895
1896         al_eth_adapter_stop(&adapter->hal_adapter);
1897
1898         adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED;
1899
1900         /* disable flow ctrl to avoid pause packets*/
1901         al_eth_flow_ctrl_disable(adapter);
1902
1903         return (0);
1904 }
1905
1906 /*
1907  * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts
1908  * @irq: interrupt number
1909  * @data: pointer to a network interface device structure
1910  */
1911 static int
1912 al_eth_intr_intx_all(void *data)
1913 {
1914         struct al_eth_adapter *adapter = data;
1915
1916         struct unit_regs __iomem *regs_base =
1917             (struct unit_regs __iomem *)adapter->udma_base;
1918         uint32_t reg;
1919
1920         reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
1921             AL_INT_GROUP_A);
1922         if (likely(reg))
1923                 device_printf_dbg(adapter->dev, "%s group A cause %x\n",
1924                     __func__, reg);
1925
1926         if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) {
1927                 struct al_iofic_grp_ctrl __iomem *sec_ints_base;
1928                 uint32_t cause_d =  al_udma_iofic_read_cause(regs_base,
1929                     AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D);
1930
1931                 sec_ints_base =
1932                     &regs_base->gen.interrupt_regs.secondary_iofic_ctrl[0];
1933                 if (cause_d != 0) {
1934                         device_printf_dbg(adapter->dev,
1935                             "got interrupt from group D. cause %x\n", cause_d);
1936
1937                         cause_d = al_iofic_read_cause(sec_ints_base,
1938                             AL_INT_GROUP_A);
1939                         device_printf(adapter->dev,
1940                             "secondary A cause %x\n", cause_d);
1941
1942                         cause_d = al_iofic_read_cause(sec_ints_base,
1943                             AL_INT_GROUP_B);
1944
1945                         device_printf_dbg(adapter->dev,
1946                             "secondary B cause %x\n", cause_d);
1947                 }
1948         }
1949         if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) {
1950                 uint32_t cause_b = al_udma_iofic_read_cause(regs_base,
1951                     AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B);
1952                 int qid;
1953                 device_printf_dbg(adapter->dev, "secondary B cause %x\n",
1954                     cause_b);
1955                 for (qid = 0; qid < adapter->num_rx_queues; qid++) {
1956                         if (cause_b & (1 << qid)) {
1957                                 /* mask */
1958                                 al_udma_iofic_mask(
1959                                     (struct unit_regs __iomem *)adapter->udma_base,
1960                                     AL_UDMA_IOFIC_LEVEL_PRIMARY,
1961                                     AL_INT_GROUP_B, 1 << qid);
1962                         }
1963                 }
1964         }
1965         if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) {
1966                 uint32_t cause_c = al_udma_iofic_read_cause(regs_base,
1967                     AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C);
1968                 int qid;
1969                 device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c);
1970                 for (qid = 0; qid < adapter->num_tx_queues; qid++) {
1971                         if ((cause_c & (1 << qid)) != 0) {
1972                                 al_udma_iofic_mask(
1973                                     (struct unit_regs __iomem *)adapter->udma_base,
1974                                     AL_UDMA_IOFIC_LEVEL_PRIMARY,
1975                                     AL_INT_GROUP_C, 1 << qid);
1976                         }
1977                 }
1978         }
1979
1980         al_eth_tx_cmlp_irq_filter(adapter->tx_ring);
1981
1982         return (0);
1983 }
1984
1985 static int
1986 al_eth_intr_msix_all(void *data)
1987 {
1988         struct al_eth_adapter *adapter = data;
1989
1990         device_printf_dbg(adapter->dev, "%s\n", __func__);
1991         return (0);
1992 }
1993
1994 static int
1995 al_eth_intr_msix_mgmt(void *data)
1996 {
1997         struct al_eth_adapter *adapter = data;
1998
1999         device_printf_dbg(adapter->dev, "%s\n", __func__);
2000         return (0);
2001 }
2002
2003 static int
2004 al_eth_enable_msix(struct al_eth_adapter *adapter)
2005 {
2006         int i, msix_vecs, rc, count;
2007
2008         device_printf_dbg(adapter->dev, "%s\n", __func__);
2009         msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues;
2010
2011         device_printf_dbg(adapter->dev,
2012             "Try to enable MSIX, vector numbers = %d\n", msix_vecs);
2013
2014         adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
2015             M_IFAL, M_ZERO | M_WAITOK);
2016
2017         if (adapter->msix_entries == NULL) {
2018                 device_printf_dbg(adapter->dev, "failed to allocate"
2019                     " msix_entries %d\n", msix_vecs);
2020                 rc = ENOMEM;
2021                 goto exit;
2022         }
2023
2024         /* management vector (GROUP_A) @2*/
2025         adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
2026         adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2027
2028         /* rx queues start @3 */
2029         for (i = 0; i < adapter->num_rx_queues; i++) {
2030                 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2031
2032                 adapter->msix_entries[irq_idx].entry = 3 + i;
2033                 adapter->msix_entries[irq_idx].vector = 0;
2034         }
2035         /* tx queues start @7 */
2036         for (i = 0; i < adapter->num_tx_queues; i++) {
2037                 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2038
2039                 adapter->msix_entries[irq_idx].entry = 3 +
2040                     AL_ETH_MAX_HW_QUEUES + i;
2041                 adapter->msix_entries[irq_idx].vector = 0;
2042         }
2043
2044         count = msix_vecs + 2; /* entries start from 2 */
2045         rc = pci_alloc_msix(adapter->dev, &count);
2046
2047         if (rc != 0) {
2048                 device_printf_dbg(adapter->dev, "failed to allocate MSIX "
2049                     "vectors %d\n", msix_vecs+2);
2050                 device_printf_dbg(adapter->dev, "ret = %d\n", rc);
2051                 goto msix_entries_exit;
2052         }
2053
2054         if (count != msix_vecs + 2) {
2055                 device_printf_dbg(adapter->dev, "failed to allocate all MSIX "
2056                     "vectors %d, allocated %d\n", msix_vecs+2, count);
2057                 rc = ENOSPC;
2058                 goto msix_entries_exit;
2059         }
2060
2061         for (i = 0; i < msix_vecs; i++)
2062             adapter->msix_entries[i].vector = 2 + 1 + i;
2063
2064         device_printf_dbg(adapter->dev, "successfully enabled MSIX,"
2065             " vectors %d\n", msix_vecs);
2066
2067         adapter->msix_vecs = msix_vecs;
2068         adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED;
2069         goto exit;
2070
2071 msix_entries_exit:
2072         adapter->msix_vecs = 0;
2073         free(adapter->msix_entries, M_IFAL);
2074         adapter->msix_entries = NULL;
2075
2076 exit:
2077         return (rc);
2078 }
2079
2080 static int
2081 al_eth_setup_int_mode(struct al_eth_adapter *adapter)
2082 {
2083         int i, rc;
2084
2085         rc = al_eth_enable_msix(adapter);
2086         if (rc != 0) {
2087                 device_printf(adapter->dev, "Failed to enable MSIX mode.\n");
2088                 return (rc);
2089         }
2090
2091         adapter->irq_vecs = max(1, adapter->msix_vecs);
2092         /* single INTX mode */
2093         if (adapter->msix_vecs == 0) {
2094                 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2095                     AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s",
2096                     device_get_name(adapter->dev));
2097                 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2098                     al_eth_intr_intx_all;
2099                 /* IRQ vector will be resolved from device resources */
2100                 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0;
2101                 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2102
2103                 device_printf(adapter->dev, "%s and vector %d \n", __func__,
2104                     adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector);
2105
2106                 return (0);
2107         }
2108         /* single MSI-X mode */
2109         if (adapter->msix_vecs == 1) {
2110                 snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name,
2111                     AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s",
2112                     device_get_name(adapter->dev));
2113                 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler =
2114                     al_eth_intr_msix_all;
2115                 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2116                     adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2117                 adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2118
2119                 return (0);
2120         }
2121         /* MSI-X per queue */
2122         snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE,
2123             "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev));
2124         adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt;
2125
2126         adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter;
2127         adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector =
2128             adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector;
2129
2130         for (i = 0; i < adapter->num_rx_queues; i++) {
2131                 int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i);
2132
2133                 snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE,
2134                     "al-eth-rx-comp-%d@pci:%s", i,
2135                     device_get_name(adapter->dev));
2136                 adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter;
2137                 adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i];
2138                 adapter->irq_tbl[irq_idx].vector =
2139                     adapter->msix_entries[irq_idx].vector;
2140         }
2141
2142         for (i = 0; i < adapter->num_tx_queues; i++) {
2143                 int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i);
2144
2145                 snprintf(adapter->irq_tbl[irq_idx].name,
2146                     AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i,
2147                     device_get_name(adapter->dev));
2148                 adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter;
2149                 adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i];
2150                 adapter->irq_tbl[irq_idx].vector =
2151                     adapter->msix_entries[irq_idx].vector;
2152         }
2153
2154         return (0);
2155 }
2156
2157 static void
2158 __al_eth_free_irq(struct al_eth_adapter *adapter)
2159 {
2160         struct al_eth_irq *irq;
2161         int i, rc;
2162
2163         for (i = 0; i < adapter->irq_vecs; i++) {
2164                 irq = &adapter->irq_tbl[i];
2165                 if (irq->requested != 0) {
2166                         device_printf_dbg(adapter->dev, "tear down irq: %d\n",
2167                             irq->vector);
2168                         rc = bus_teardown_intr(adapter->dev, irq->res,
2169                             irq->cookie);
2170                         if (rc != 0)
2171                                 device_printf(adapter->dev, "failed to tear "
2172                                     "down irq: %d\n", irq->vector);
2173
2174                 }
2175                 irq->requested = 0;
2176         }
2177 }
2178
2179 static void
2180 al_eth_free_irq(struct al_eth_adapter *adapter)
2181 {
2182         struct al_eth_irq *irq;
2183         int i, rc;
2184 #ifdef CONFIG_RFS_ACCEL
2185         if (adapter->msix_vecs >= 1) {
2186                 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2187                 adapter->netdev->rx_cpu_rmap = NULL;
2188         }
2189 #endif
2190
2191         __al_eth_free_irq(adapter);
2192
2193         for (i = 0; i < adapter->irq_vecs; i++) {
2194                 irq = &adapter->irq_tbl[i];
2195                 if (irq->res == NULL)
2196                         continue;
2197                 device_printf_dbg(adapter->dev, "release resource irq: %d\n",
2198                     irq->vector);
2199                 rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector,
2200                     irq->res);
2201                 irq->res = NULL;
2202                 if (rc != 0)
2203                         device_printf(adapter->dev, "dev has no parent while "
2204                             "releasing res for irq: %d\n", irq->vector);
2205         }
2206
2207         pci_release_msi(adapter->dev);
2208
2209         adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED;
2210
2211         adapter->msix_vecs = 0;
2212         free(adapter->msix_entries, M_IFAL);
2213         adapter->msix_entries = NULL;
2214 }
2215
2216 static int
2217 al_eth_request_irq(struct al_eth_adapter *adapter)
2218 {
2219         unsigned long flags;
2220         struct al_eth_irq *irq;
2221         int rc = 0, i, v;
2222
2223         if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0)
2224                 flags = RF_ACTIVE;
2225         else
2226                 flags = RF_ACTIVE | RF_SHAREABLE;
2227
2228         for (i = 0; i < adapter->irq_vecs; i++) {
2229                 irq = &adapter->irq_tbl[i];
2230
2231                 if (irq->requested != 0)
2232                         continue;
2233
2234                 irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ,
2235                     &irq->vector, flags);
2236                 if (irq->res == NULL) {
2237                         device_printf(adapter->dev, "could not allocate "
2238                             "irq vector=%d\n", irq->vector);
2239                         rc = ENXIO;
2240                         goto exit_res;
2241                 }
2242
2243                 if ((rc = bus_setup_intr(adapter->dev, irq->res,
2244                     INTR_TYPE_NET | INTR_MPSAFE, irq->handler,
2245                     NULL, irq->data, &irq->cookie)) != 0) {
2246                         device_printf(adapter->dev, "failed to register "
2247                             "interrupt handler for irq %ju: %d\n",
2248                             (uintmax_t)rman_get_start(irq->res), rc);
2249                         goto exit_intr;
2250                 }
2251                 irq->requested = 1;
2252         }
2253         goto exit;
2254
2255 exit_intr:
2256         v = i - 1; /* -1 because we omit the operation that failed */
2257         while (v-- >= 0) {
2258                 int bti;
2259                 irq = &adapter->irq_tbl[v];
2260                 bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie);
2261                 if (bti != 0) {
2262                         device_printf(adapter->dev, "failed to tear "
2263                             "down irq: %d\n", irq->vector);
2264                 }
2265
2266                 irq->requested = 0;
2267                 device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n",
2268                     irq->vector);
2269         }
2270
2271 exit_res:
2272         v = i - 1; /* -1 because we omit the operation that failed */
2273         while (v-- >= 0) {
2274                 int brr;
2275                 irq = &adapter->irq_tbl[v];
2276                 device_printf_dbg(adapter->dev, "exit_res: releasing resource"
2277                     " for irq %d\n", irq->vector);
2278                 brr = bus_release_resource(adapter->dev, SYS_RES_IRQ,
2279                     irq->vector, irq->res);
2280                 if (brr != 0)
2281                         device_printf(adapter->dev, "dev has no parent while "
2282                             "releasing res for irq: %d\n", irq->vector);
2283                 irq->res = NULL;
2284         }
2285
2286 exit:
2287         return (rc);
2288 }
2289
2290 /**
2291  * al_eth_setup_tx_resources - allocate Tx resources (Descriptors)
2292  * @adapter: network interface device structure
2293  * @qid: queue index
2294  *
2295  * Return 0 on success, negative on failure
2296  **/
2297 static int
2298 al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
2299 {
2300         struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2301         struct device *dev = tx_ring->dev;
2302         struct al_udma_q_params *q_params = &tx_ring->q_params;
2303         int size;
2304         int ret;
2305
2306         if (adapter->up)
2307                 return (0);
2308
2309         size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
2310
2311         tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2312         if (tx_ring->tx_buffer_info == NULL)
2313                 return (ENOMEM);
2314
2315         tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
2316         q_params->size = tx_ring->hw_count;
2317
2318         ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2319             (bus_dmamap_t *)&q_params->desc_phy_base_map,
2320             (bus_addr_t *)&q_params->desc_phy_base,
2321             (void**)&q_params->desc_base, tx_ring->descs_size);
2322         if (ret != 0) {
2323                 device_printf(dev, "failed to al_dma_alloc_coherent,"
2324                     " ret = %d\n", ret);
2325                 return (ENOMEM);
2326         }
2327
2328         if (q_params->desc_base == NULL)
2329                 return (ENOMEM);
2330
2331         device_printf_dbg(dev, "Initializing ring queues %d\n", qid);
2332
2333         /* Allocate Ring Queue */
2334         mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
2335         tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
2336             &tx_ring->br_mtx);
2337         if (tx_ring->br == NULL) {
2338                 device_printf(dev, "Critical Failure setting up buf ring\n");
2339                 return (ENOMEM);
2340         }
2341
2342         /* Allocate taskqueues */
2343         TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
2344         tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT,
2345             taskqueue_thread_enqueue, &tx_ring->enqueue_tq);
2346         taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq",
2347             device_get_nameunit(adapter->dev));
2348         TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring);
2349         tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT,
2350             taskqueue_thread_enqueue, &tx_ring->cmpl_tq);
2351         taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq",
2352             device_get_nameunit(adapter->dev));
2353
2354         /* Setup DMA descriptor areas. */
2355         ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2356             1, 0,                       /* alignment, bounds */
2357             BUS_SPACE_MAXADDR,          /* lowaddr */
2358             BUS_SPACE_MAXADDR,          /* highaddr */
2359             NULL, NULL,                 /* filter, filterarg */
2360             AL_TSO_SIZE,                /* maxsize */
2361             AL_ETH_PKT_MAX_BUFS,        /* nsegments */
2362             PAGE_SIZE,                  /* maxsegsize */
2363             0,                          /* flags */
2364             NULL,                       /* lockfunc */
2365             NULL,                       /* lockfuncarg */
2366             &tx_ring->dma_buf_tag);
2367
2368         if (ret != 0) {
2369                 device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n",
2370                     ret);
2371                 return (ret);
2372         }
2373
2374         for (size = 0; size < tx_ring->sw_count; size++) {
2375                 ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0,
2376                     &tx_ring->tx_buffer_info[size].dma_map);
2377                 if (ret != 0) {
2378                         device_printf(dev, "Unable to map DMA TX "
2379                             "buffer memory [iter=%d]\n", size);
2380                         return (ret);
2381                 }
2382         }
2383
2384         /* completion queue not used for tx */
2385         q_params->cdesc_base = NULL;
2386         /* size in bytes of the udma completion ring descriptor */
2387         q_params->cdesc_size = 8;
2388         tx_ring->next_to_use = 0;
2389         tx_ring->next_to_clean = 0;
2390
2391         return (0);
2392 }
2393
2394 /*
2395  * al_eth_free_tx_resources - Free Tx Resources per Queue
2396  * @adapter: network interface device structure
2397  * @qid: queue index
2398  *
2399  * Free all transmit software resources
2400  */
2401 static void
2402 al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid)
2403 {
2404         struct al_eth_ring *tx_ring = &adapter->tx_ring[qid];
2405         struct al_udma_q_params *q_params = &tx_ring->q_params;
2406         int size;
2407
2408         /* At this point interrupts' handlers must be deactivated */
2409         while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL))
2410                 taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task);
2411
2412         taskqueue_free(tx_ring->cmpl_tq);
2413         while (taskqueue_cancel(tx_ring->enqueue_tq,
2414             &tx_ring->enqueue_task, NULL)) {
2415                 taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task);
2416         }
2417
2418         taskqueue_free(tx_ring->enqueue_tq);
2419
2420         if (tx_ring->br != NULL) {
2421                 drbr_flush(adapter->netdev, tx_ring->br);
2422                 buf_ring_free(tx_ring->br, M_DEVBUF);
2423         }
2424
2425         for (size = 0; size < tx_ring->sw_count; size++) {
2426                 m_freem(tx_ring->tx_buffer_info[size].m);
2427                 tx_ring->tx_buffer_info[size].m = NULL;
2428
2429                 bus_dmamap_unload(tx_ring->dma_buf_tag,
2430                     tx_ring->tx_buffer_info[size].dma_map);
2431                 bus_dmamap_destroy(tx_ring->dma_buf_tag,
2432                     tx_ring->tx_buffer_info[size].dma_map);
2433         }
2434         bus_dma_tag_destroy(tx_ring->dma_buf_tag);
2435
2436         free(tx_ring->tx_buffer_info, M_IFAL);
2437         tx_ring->tx_buffer_info = NULL;
2438
2439         mtx_destroy(&tx_ring->br_mtx);
2440
2441         /* if not set, then don't free */
2442         if (q_params->desc_base == NULL)
2443                 return;
2444
2445         al_dma_free_coherent(q_params->desc_phy_base_tag,
2446             q_params->desc_phy_base_map, q_params->desc_base);
2447
2448         q_params->desc_base = NULL;
2449 }
2450
2451 /*
2452  * al_eth_free_all_tx_resources - Free Tx Resources for All Queues
2453  * @adapter: board private structure
2454  *
2455  * Free all transmit software resources
2456  */
2457 static void
2458 al_eth_free_all_tx_resources(struct al_eth_adapter *adapter)
2459 {
2460         int i;
2461
2462         for (i = 0; i < adapter->num_tx_queues; i++)
2463                 if (adapter->tx_ring[i].q_params.desc_base)
2464                         al_eth_free_tx_resources(adapter, i);
2465 }
2466
2467 /*
2468  * al_eth_setup_rx_resources - allocate Rx resources (Descriptors)
2469  * @adapter: network interface device structure
2470  * @qid: queue index
2471  *
2472  * Returns 0 on success, negative on failure
2473  */
2474 static int
2475 al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2476 {
2477         struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2478         struct device *dev = rx_ring->dev;
2479         struct al_udma_q_params *q_params = &rx_ring->q_params;
2480         int size;
2481         int ret;
2482
2483         size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count;
2484
2485         /* alloc extra element so in rx path we can always prefetch rx_info + 1 */
2486         size += 1;
2487
2488         rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
2489         if (rx_ring->rx_buffer_info == NULL)
2490                 return (ENOMEM);
2491
2492         rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
2493         q_params->size = rx_ring->hw_count;
2494
2495         ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag,
2496             &q_params->desc_phy_base_map,
2497             (bus_addr_t *)&q_params->desc_phy_base,
2498             (void**)&q_params->desc_base, rx_ring->descs_size);
2499
2500         if ((q_params->desc_base == NULL) || (ret != 0))
2501                 return (ENOMEM);
2502
2503         /* size in bytes of the udma completion ring descriptor */
2504         q_params->cdesc_size = 16;
2505         rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size;
2506         ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag,
2507             &q_params->cdesc_phy_base_map,
2508             (bus_addr_t *)&q_params->cdesc_phy_base,
2509             (void**)&q_params->cdesc_base, rx_ring->cdescs_size);
2510
2511         if ((q_params->cdesc_base == NULL) || (ret != 0))
2512                 return (ENOMEM);
2513
2514         /* Allocate taskqueues */
2515         TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring);
2516         rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT,
2517             taskqueue_thread_enqueue, &rx_ring->enqueue_tq);
2518         taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq",
2519             device_get_nameunit(adapter->dev));
2520
2521         /* Setup DMA descriptor areas. */
2522         ret = bus_dma_tag_create(bus_get_dma_tag(dev),
2523             1, 0,                       /* alignment, bounds */
2524             BUS_SPACE_MAXADDR,          /* lowaddr */
2525             BUS_SPACE_MAXADDR,          /* highaddr */
2526             NULL, NULL,                 /* filter, filterarg */
2527             AL_TSO_SIZE,                /* maxsize */
2528             1,                          /* nsegments */
2529             AL_TSO_SIZE,                /* maxsegsize */
2530             0,                          /* flags */
2531             NULL,                       /* lockfunc */
2532             NULL,                       /* lockfuncarg */
2533             &rx_ring->dma_buf_tag);
2534
2535         if (ret != 0) {
2536                 device_printf(dev,"Unable to allocate RX dma_buf_tag\n");
2537                 return (ret);
2538         }
2539
2540         for (size = 0; size < rx_ring->sw_count; size++) {
2541                 ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0,
2542                     &rx_ring->rx_buffer_info[size].dma_map);
2543                 if (ret != 0) {
2544                         device_printf(dev,"Unable to map DMA RX buffer memory\n");
2545                         return (ret);
2546                 }
2547         }
2548
2549         /* Zero out the descriptor ring */
2550         memset(q_params->cdesc_base, 0, rx_ring->cdescs_size);
2551
2552         /* Create LRO for the ring */
2553         if ((adapter->netdev->if_capenable & IFCAP_LRO) != 0) {
2554                 int err = tcp_lro_init(&rx_ring->lro);
2555                 if (err != 0) {
2556                         device_printf(adapter->dev,
2557                             "LRO[%d] Initialization failed!\n", qid);
2558                 } else {
2559                         device_printf_dbg(adapter->dev,
2560                             "RX Soft LRO[%d] Initialized\n", qid);
2561                         rx_ring->lro_enabled = TRUE;
2562                         rx_ring->lro.ifp = adapter->netdev;
2563                 }
2564         }
2565
2566         rx_ring->next_to_clean = 0;
2567         rx_ring->next_to_use = 0;
2568
2569         return (0);
2570 }
2571
2572 /*
2573  * al_eth_free_rx_resources - Free Rx Resources
2574  * @adapter: network interface device structure
2575  * @qid: queue index
2576  *
2577  * Free all receive software resources
2578  */
2579 static void
2580 al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
2581 {
2582         struct al_eth_ring *rx_ring = &adapter->rx_ring[qid];
2583         struct al_udma_q_params *q_params = &rx_ring->q_params;
2584         int size;
2585
2586         /* At this point interrupts' handlers must be deactivated */
2587         while (taskqueue_cancel(rx_ring->enqueue_tq,
2588             &rx_ring->enqueue_task, NULL)) {
2589                 taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task);
2590         }
2591
2592         taskqueue_free(rx_ring->enqueue_tq);
2593
2594         for (size = 0; size < rx_ring->sw_count; size++) {
2595                 m_freem(rx_ring->rx_buffer_info[size].m);
2596                 rx_ring->rx_buffer_info[size].m = NULL;
2597                 bus_dmamap_unload(rx_ring->dma_buf_tag,
2598                     rx_ring->rx_buffer_info[size].dma_map);
2599                 bus_dmamap_destroy(rx_ring->dma_buf_tag,
2600                     rx_ring->rx_buffer_info[size].dma_map);
2601         }
2602         bus_dma_tag_destroy(rx_ring->dma_buf_tag);
2603
2604         free(rx_ring->rx_buffer_info, M_IFAL);
2605         rx_ring->rx_buffer_info = NULL;
2606
2607         /* if not set, then don't free */
2608         if (q_params->desc_base == NULL)
2609                 return;
2610
2611         al_dma_free_coherent(q_params->desc_phy_base_tag,
2612             q_params->desc_phy_base_map, q_params->desc_base);
2613
2614         q_params->desc_base = NULL;
2615
2616         /* if not set, then don't free */
2617         if (q_params->cdesc_base == NULL)
2618                 return;
2619
2620         al_dma_free_coherent(q_params->cdesc_phy_base_tag,
2621             q_params->cdesc_phy_base_map, q_params->cdesc_base);
2622
2623         q_params->cdesc_phy_base = 0;
2624
2625         /* Free LRO resources */
2626         tcp_lro_free(&rx_ring->lro);
2627 }
2628
2629 /*
2630  * al_eth_free_all_rx_resources - Free Rx Resources for All Queues
2631  * @adapter: board private structure
2632  *
2633  * Free all receive software resources
2634  */
2635 static void
2636 al_eth_free_all_rx_resources(struct al_eth_adapter *adapter)
2637 {
2638         int i;
2639
2640         for (i = 0; i < adapter->num_rx_queues; i++)
2641                 if (adapter->rx_ring[i].q_params.desc_base != 0)
2642                         al_eth_free_rx_resources(adapter, i);
2643 }
2644
2645 /*
2646  * al_eth_setup_all_rx_resources - allocate all queues Rx resources
2647  * @adapter: board private structure
2648  *
2649  * Return 0 on success, negative on failure
2650  */
2651 static int
2652 al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter)
2653 {
2654         int i, rc = 0;
2655
2656         for (i = 0; i < adapter->num_rx_queues; i++) {
2657                 rc = al_eth_setup_rx_resources(adapter, i);
2658                 if (rc == 0)
2659                         continue;
2660
2661                 device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i);
2662                 goto err_setup_rx;
2663         }
2664         return (0);
2665
2666 err_setup_rx:
2667         /* rewind the index freeing the rings as we go */
2668         while (i--)
2669                 al_eth_free_rx_resources(adapter, i);
2670         return (rc);
2671 }
2672
2673 /*
2674  * al_eth_setup_all_tx_resources - allocate all queues Tx resources
2675  * @adapter: private structure
2676  *
2677  * Return 0 on success, negative on failure
2678  */
2679 static int
2680 al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter)
2681 {
2682         int i, rc = 0;
2683
2684         for (i = 0; i < adapter->num_tx_queues; i++) {
2685                 rc = al_eth_setup_tx_resources(adapter, i);
2686                 if (rc == 0)
2687                         continue;
2688
2689                 device_printf(adapter->dev,
2690                     "Allocation for Tx Queue %u failed\n", i);
2691                 goto err_setup_tx;
2692         }
2693
2694         return (0);
2695
2696 err_setup_tx:
2697         /* rewind the index freeing the rings as we go */
2698         while (i--)
2699                 al_eth_free_tx_resources(adapter, i);
2700
2701         return (rc);
2702 }
2703
2704 static void
2705 al_eth_disable_int_sync(struct al_eth_adapter *adapter)
2706 {
2707
2708         /* disable forwarding interrupts from eth through pci end point */
2709         if ((adapter->board_type == ALPINE_FPGA_NIC) ||
2710             (adapter->board_type == ALPINE_NIC)) {
2711                 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
2712                     AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR);
2713         }
2714
2715         /* mask hw interrupts */
2716         al_eth_interrupts_mask(adapter);
2717 }
2718
2719 static void
2720 al_eth_interrupts_unmask(struct al_eth_adapter *adapter)
2721 {
2722         uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */
2723         uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/
2724         uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/
2725         uint32_t group_d_mask = 3 << 8;
2726         struct unit_regs __iomem *regs_base =
2727             (struct unit_regs __iomem *)adapter->udma_base;
2728
2729         if (adapter->int_mode == AL_IOFIC_MODE_LEGACY)
2730                 group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM |
2731                     AL_INT_GROUP_A_GROUP_C_SUM |
2732                     AL_INT_GROUP_A_GROUP_D_SUM;
2733
2734         al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2735             AL_INT_GROUP_A, group_a_mask);
2736         al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2737             AL_INT_GROUP_B, group_b_mask);
2738         al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2739             AL_INT_GROUP_C, group_c_mask);
2740         al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2741             AL_INT_GROUP_D, group_d_mask);
2742 }
2743
2744 static void
2745 al_eth_interrupts_mask(struct al_eth_adapter *adapter)
2746 {
2747         struct unit_regs __iomem *regs_base =
2748             (struct unit_regs __iomem *)adapter->udma_base;
2749
2750         /* mask all interrupts */
2751         al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2752             AL_INT_GROUP_A, AL_MASK_GROUP_A_INT);
2753         al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2754             AL_INT_GROUP_B, AL_MASK_GROUP_B_INT);
2755         al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2756             AL_INT_GROUP_C, AL_MASK_GROUP_C_INT);
2757         al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY,
2758             AL_INT_GROUP_D, AL_MASK_GROUP_D_INT);
2759 }
2760
2761 static int
2762 al_eth_configure_int_mode(struct al_eth_adapter *adapter)
2763 {
2764         enum al_iofic_mode int_mode;
2765         uint32_t m2s_errors_disable = AL_M2S_MASK_INIT;
2766         uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT;
2767         uint32_t s2m_errors_disable = AL_S2M_MASK_INIT;
2768         uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT;
2769
2770         /* single INTX mode */
2771         if (adapter->msix_vecs == 0)
2772                 int_mode = AL_IOFIC_MODE_LEGACY;
2773         else if (adapter->msix_vecs > 1)
2774                 int_mode = AL_IOFIC_MODE_MSIX_PER_Q;
2775         else {
2776                 device_printf(adapter->dev,
2777                     "udma doesn't support single MSI-X mode yet.\n");
2778                 return (EIO);
2779         }
2780
2781         if (adapter->board_type != ALPINE_INTEGRATED) {
2782                 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2783                 m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT;
2784                 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2785                 s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT;
2786         }
2787
2788         if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base,
2789             int_mode, m2s_errors_disable, m2s_aborts_disable,
2790             s2m_errors_disable, s2m_aborts_disable)) {
2791                 device_printf(adapter->dev,
2792                     "al_udma_unit_int_config failed!.\n");
2793                 return (EIO);
2794         }
2795         adapter->int_mode = int_mode;
2796         device_printf_dbg(adapter->dev, "using %s interrupt mode\n",
2797             int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" :
2798             int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown");
2799         /* set interrupt moderation resolution to 15us */
2800         al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15);
2801         al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15);
2802         /* by default interrupt coalescing is disabled */
2803         adapter->tx_usecs = 0;
2804         adapter->rx_usecs = 0;
2805
2806         return (0);
2807 }
2808
2809 /*
2810  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
2811  * @index: Index in RX flow hash indirection table
2812  * @n_rx_rings: Number of RX rings to use
2813  *
2814  * This function provides the default policy for RX flow hash indirection.
2815  */
2816 static inline uint32_t
2817 ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
2818 {
2819
2820         return (index % n_rx_rings);
2821 }
2822
2823 static void*
2824 al_eth_update_stats(struct al_eth_adapter *adapter)
2825 {
2826         struct al_eth_mac_stats *mac_stats = &adapter->mac_stats;
2827
2828         if (adapter->up == 0)
2829                 return (NULL);
2830
2831         al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats);
2832
2833         return (NULL);
2834 }
2835
2836 static uint64_t
2837 al_get_counter(struct ifnet *ifp, ift_counter cnt)
2838 {
2839         struct al_eth_adapter *adapter;
2840         struct al_eth_mac_stats *mac_stats;
2841         uint64_t rv;
2842
2843         adapter = if_getsoftc(ifp);
2844         mac_stats = &adapter->mac_stats;
2845
2846         switch (cnt) {
2847         case IFCOUNTER_IPACKETS:
2848                 return (mac_stats->aFramesReceivedOK); /* including pause frames */
2849         case IFCOUNTER_OPACKETS:
2850                 return (mac_stats->aFramesTransmittedOK);
2851         case IFCOUNTER_IBYTES:
2852                 return (mac_stats->aOctetsReceivedOK);
2853         case IFCOUNTER_OBYTES:
2854                 return (mac_stats->aOctetsTransmittedOK);
2855         case IFCOUNTER_IMCASTS:
2856                 return (mac_stats->ifInMulticastPkts);
2857         case IFCOUNTER_OMCASTS:
2858                 return (mac_stats->ifOutMulticastPkts);
2859         case IFCOUNTER_COLLISIONS:
2860                 return (0);
2861         case IFCOUNTER_IQDROPS:
2862                 return (mac_stats->etherStatsDropEvents);
2863         case IFCOUNTER_IERRORS:
2864                 rv = mac_stats->ifInErrors +
2865                     mac_stats->etherStatsUndersizePkts + /* good but short */
2866                     mac_stats->etherStatsFragments + /* short and bad*/
2867                     mac_stats->etherStatsJabbers + /* with crc errors */
2868                     mac_stats->etherStatsOversizePkts +
2869                     mac_stats->aFrameCheckSequenceErrors +
2870                     mac_stats->aAlignmentErrors;
2871                 return (rv);
2872         case IFCOUNTER_OERRORS:
2873                 return (mac_stats->ifOutErrors);
2874         default:
2875                 return (if_get_counter_default(ifp, cnt));
2876         }
2877 }
2878
2879 /*
2880  *  Unicast, Multicast and Promiscuous mode set
2881  *
2882  *  The set_rx_mode entry point is called whenever the unicast or multicast
2883  *  address lists or the network interface flags are updated.  This routine is
2884  *  responsible for configuring the hardware for proper unicast, multicast,
2885  *  promiscuous mode, and all-multi behavior.
2886  */
2887 #define MAX_NUM_MULTICAST_ADDRESSES 32
2888 #define MAX_NUM_ADDRESSES           32
2889
2890 static void
2891 al_eth_set_rx_mode(struct al_eth_adapter *adapter)
2892 {
2893         struct ifnet *ifp = adapter->netdev;
2894         struct ifmultiaddr *ifma; /* multicast addresses configured */
2895         struct ifaddr *ifua; /* unicast address */
2896         int mc = 0;
2897         int uc = 0;
2898         uint8_t i;
2899         unsigned char *mac;
2900
2901         if_maddr_rlock(ifp);
2902         CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2903                 if (ifma->ifma_addr->sa_family != AF_LINK)
2904                         continue;
2905                 if (mc == MAX_NUM_MULTICAST_ADDRESSES)
2906                         break;
2907
2908                 mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2909                 /* default mc address inside mac address */
2910                 if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1)
2911                         mc++;
2912         }
2913         if_maddr_runlock(ifp);
2914
2915         if_addr_rlock(ifp);
2916         CK_STAILQ_FOREACH(ifua, &ifp->if_addrhead, ifa_link) {
2917                 if (ifua->ifa_addr->sa_family != AF_LINK)
2918                         continue;
2919                 if (uc == MAX_NUM_ADDRESSES)
2920                         break;
2921                 uc++;
2922         }
2923         if_addr_runlock(ifp);
2924
2925         if ((ifp->if_flags & IFF_PROMISC) != 0) {
2926                 al_eth_mac_table_promiscuous_set(adapter, true);
2927         } else {
2928                 if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
2929                         /* This interface is in all-multicasts mode (used by multicast routers). */
2930                         al_eth_mac_table_all_multicast_add(adapter,
2931                             AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2932                 } else {
2933                         if (mc == 0) {
2934                                 al_eth_mac_table_entry_clear(adapter,
2935                                     AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX);
2936                         } else {
2937                                 al_eth_mac_table_all_multicast_add(adapter,
2938                                     AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1);
2939                         }
2940                 }
2941                 if (uc != 0) {
2942                         i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2943                         if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) {
2944                                 /*
2945                                  * In this case there are more addresses then
2946                                  * entries in the mac table - set promiscuous
2947                                  */
2948                                 al_eth_mac_table_promiscuous_set(adapter, true);
2949                                 return;
2950                         }
2951
2952                         /* clear the last configuration */
2953                         while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE +
2954                                     AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) {
2955                                 al_eth_mac_table_entry_clear(adapter, i);
2956                                 i++;
2957                         }
2958
2959                         /* set new addresses */
2960                         i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1;
2961                         if_addr_rlock(ifp);
2962                         CK_STAILQ_FOREACH(ifua, &ifp->if_addrhead, ifa_link) {
2963                                 if (ifua->ifa_addr->sa_family != AF_LINK) {
2964                                         continue;
2965                                 }
2966                                 al_eth_mac_table_unicast_add(adapter, i,
2967                                     (unsigned char *)ifua->ifa_addr, 1);
2968                                 i++;
2969                         }
2970                         if_addr_runlock(ifp);
2971
2972                 }
2973                 al_eth_mac_table_promiscuous_set(adapter, false);
2974         }
2975 }
2976
2977 static void
2978 al_eth_config_rx_fwd(struct al_eth_adapter *adapter)
2979 {
2980         struct al_eth_fwd_ctrl_table_entry entry;
2981         int i;
2982
2983         /* let priority be equal to pbits */
2984         for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++)
2985                 al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i);
2986
2987         /* map priority to queue index, queue id = priority/2 */
2988         for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++)
2989                 al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1);
2990
2991         entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0;
2992         entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE;
2993         entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO;
2994         entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE;
2995         entry.filter = FALSE;
2996
2997         al_eth_ctrl_table_def_set(&adapter->hal_adapter, FALSE, &entry);
2998
2999         /*
3000          * By default set the mac table to forward all unicast packets to our
3001          * MAC address and all broadcast. all the rest will be dropped.
3002          */
3003         al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE,
3004             adapter->mac_addr, 1);
3005         al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1);
3006         al_eth_mac_table_promiscuous_set(adapter, false);
3007
3008         /* set toeplitz hash keys */
3009         for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++)
3010                 *((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random();
3011
3012         for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++)
3013                 al_eth_hash_key_set(&adapter->hal_adapter, i,
3014                     htonl(adapter->toeplitz_hash_key[i]));
3015
3016         for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) {
3017                 adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i,
3018                     AL_ETH_NUM_QUEUES);
3019                 al_eth_set_thash_table_entry(adapter, i, 0,
3020                     adapter->rss_ind_tbl[i]);
3021         }
3022
3023         al_eth_fsm_table_init(adapter);
3024 }
3025
3026 static void
3027 al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size)
3028 {
3029
3030         /*
3031         * Determine the correct mbuf pool
3032         * for doing jumbo frames
3033         * Try from the smallest up to maximum supported
3034         */
3035         adapter->rx_mbuf_sz = MCLBYTES;
3036         if (size > 2048) {
3037                 if (adapter->max_rx_buff_alloc_size > 2048)
3038                         adapter->rx_mbuf_sz = MJUMPAGESIZE;
3039                 else
3040                         return;
3041         }
3042         if (size > 4096) {
3043                 if (adapter->max_rx_buff_alloc_size > 4096)
3044                         adapter->rx_mbuf_sz = MJUM9BYTES;
3045                 else
3046                         return;
3047         }
3048         if (size > 9216) {
3049                 if (adapter->max_rx_buff_alloc_size > 9216)
3050                         adapter->rx_mbuf_sz = MJUM16BYTES;
3051                 else
3052                         return;
3053         }
3054 }
3055
3056 static int
3057 al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu)
3058 {
3059         int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
3060             ETHER_VLAN_ENCAP_LEN;
3061
3062         al_eth_req_rx_buff_size(adapter, new_mtu);
3063
3064         device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu);
3065         al_eth_rx_pkt_limit_config(&adapter->hal_adapter,
3066             AL_ETH_MIN_FRAME_LEN, max_frame);
3067
3068         al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100);
3069
3070         return (0);
3071 }
3072
3073 static int
3074 al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu)
3075 {
3076         int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
3077
3078         if ((new_mtu < AL_ETH_MIN_FRAME_LEN) ||
3079             (max_frame > AL_ETH_MAX_FRAME_LEN)) {
3080                 return (EINVAL);
3081         }
3082
3083         return (0);
3084 }
3085
3086 static int
3087 al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type,
3088     int qid)
3089 {
3090         int rc = 0;
3091         char *name = (type == UDMA_TX) ? "Tx" : "Rx";
3092         struct al_udma_q_params *q_params;
3093
3094         if (type == UDMA_TX)
3095                 q_params = &adapter->tx_ring[qid].q_params;
3096         else
3097                 q_params = &adapter->rx_ring[qid].q_params;
3098
3099         rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params);
3100         if (rc < 0) {
3101                 device_printf(adapter->dev, "config %s queue %u failed\n", name,
3102                     qid);
3103                 return (rc);
3104         }
3105         return (rc);
3106 }
3107
3108 static int
3109 al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter)
3110 {
3111         int i;
3112
3113         for (i = 0; i < adapter->num_tx_queues; i++)
3114                 al_eth_udma_queue_enable(adapter, UDMA_TX, i);
3115
3116         for (i = 0; i < adapter->num_rx_queues; i++)
3117                 al_eth_udma_queue_enable(adapter, UDMA_RX, i);
3118
3119         return (0);
3120 }
3121
3122 static void
3123 al_eth_up_complete(struct al_eth_adapter *adapter)
3124 {
3125
3126         al_eth_configure_int_mode(adapter);
3127         al_eth_config_rx_fwd(adapter);
3128         al_eth_change_mtu(adapter, adapter->netdev->if_mtu);
3129         al_eth_udma_queues_enable_all(adapter);
3130         al_eth_refill_all_rx_bufs(adapter);
3131         al_eth_interrupts_unmask(adapter);
3132
3133         /* enable forwarding interrupts from eth through pci end point */
3134         if ((adapter->board_type == ALPINE_FPGA_NIC) ||
3135             (adapter->board_type == ALPINE_NIC)) {
3136                 al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base +
3137                     AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR);
3138         }
3139
3140         al_eth_flow_ctrl_enable(adapter);
3141
3142         mtx_lock(&adapter->stats_mtx);
3143         callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter);
3144         mtx_unlock(&adapter->stats_mtx);
3145
3146         al_eth_mac_start(&adapter->hal_adapter);
3147 }
3148
3149 static int
3150 al_media_update(struct ifnet *ifp)
3151 {
3152         struct al_eth_adapter *adapter = ifp->if_softc;
3153
3154         if ((ifp->if_flags & IFF_UP) != 0)
3155                 mii_mediachg(adapter->mii);
3156
3157         return (0);
3158 }
3159
3160 static void
3161 al_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3162 {
3163         struct al_eth_adapter *sc = ifp->if_softc;
3164         struct mii_data *mii;
3165
3166         if (sc->mii == NULL) {
3167                 ifmr->ifm_active = IFM_ETHER | IFM_NONE;
3168                 ifmr->ifm_status = 0;
3169
3170                 return;
3171         }
3172
3173         mii = sc->mii;
3174         mii_pollstat(mii);
3175
3176         ifmr->ifm_active = mii->mii_media_active;
3177         ifmr->ifm_status = mii->mii_media_status;
3178 }
3179
3180 static void
3181 al_tick(void *arg)
3182 {
3183         struct al_eth_adapter *adapter = arg;
3184
3185         mii_tick(adapter->mii);
3186
3187         /* Schedule another timeout one second from now */
3188         callout_schedule(&adapter->wd_callout, hz);
3189 }
3190
3191 static void
3192 al_tick_stats(void *arg)
3193 {
3194         struct al_eth_adapter *adapter = arg;
3195
3196         al_eth_update_stats(adapter);
3197
3198         callout_schedule(&adapter->stats_callout, hz);
3199 }
3200
3201 static int
3202 al_eth_up(struct al_eth_adapter *adapter)
3203 {
3204         struct ifnet *ifp = adapter->netdev;
3205         int rc;
3206
3207         if (adapter->up)
3208                 return (0);
3209
3210         if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) {
3211                 al_eth_function_reset(adapter);
3212                 adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED;
3213         }
3214
3215         ifp->if_hwassist = 0;
3216         if ((ifp->if_capenable & IFCAP_TSO) != 0)
3217                 ifp->if_hwassist |= CSUM_TSO;
3218         if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
3219                 ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
3220         if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) != 0)
3221                 ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
3222
3223         al_eth_serdes_init(adapter);
3224
3225         rc = al_eth_hw_init(adapter);
3226         if (rc != 0)
3227                 goto err_hw_init_open;
3228
3229         rc = al_eth_setup_int_mode(adapter);
3230         if (rc != 0) {
3231                 device_printf(adapter->dev,
3232                     "%s failed at setup interrupt mode!\n", __func__);
3233                 goto err_setup_int;
3234         }
3235
3236         /* allocate transmit descriptors */
3237         rc = al_eth_setup_all_tx_resources(adapter);
3238         if (rc != 0)
3239                 goto err_setup_tx;
3240
3241         /* allocate receive descriptors */
3242         rc = al_eth_setup_all_rx_resources(adapter);
3243         if (rc != 0)
3244                 goto err_setup_rx;
3245
3246         rc = al_eth_request_irq(adapter);
3247         if (rc != 0)
3248                 goto err_req_irq;
3249
3250         al_eth_up_complete(adapter);
3251
3252         adapter->up = true;
3253
3254         if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial)
3255                 adapter->netdev->if_link_state = LINK_STATE_UP;
3256
3257         if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) {
3258                 mii_mediachg(adapter->mii);
3259
3260                 /* Schedule watchdog timeout */
3261                 mtx_lock(&adapter->wd_mtx);
3262                 callout_reset(&adapter->wd_callout, hz, al_tick, adapter);
3263                 mtx_unlock(&adapter->wd_mtx);
3264
3265                 mii_pollstat(adapter->mii);
3266         }
3267
3268         return (rc);
3269
3270 err_req_irq:
3271         al_eth_free_all_rx_resources(adapter);
3272 err_setup_rx:
3273         al_eth_free_all_tx_resources(adapter);
3274 err_setup_tx:
3275         al_eth_free_irq(adapter);
3276 err_setup_int:
3277         al_eth_hw_stop(adapter);
3278 err_hw_init_open:
3279         al_eth_function_reset(adapter);
3280
3281         return (rc);
3282 }
3283
3284 static int
3285 al_shutdown(device_t dev)
3286 {
3287         struct al_eth_adapter *adapter = device_get_softc(dev);
3288
3289         al_eth_down(adapter);
3290
3291         return (0);
3292 }
3293
3294 static void
3295 al_eth_down(struct al_eth_adapter *adapter)
3296 {
3297
3298         device_printf_dbg(adapter->dev, "al_eth_down: begin\n");
3299
3300         adapter->up = false;
3301
3302         mtx_lock(&adapter->wd_mtx);
3303         callout_stop(&adapter->wd_callout);
3304         mtx_unlock(&adapter->wd_mtx);
3305
3306         al_eth_disable_int_sync(adapter);
3307
3308         mtx_lock(&adapter->stats_mtx);
3309         callout_stop(&adapter->stats_callout);
3310         mtx_unlock(&adapter->stats_mtx);
3311
3312         al_eth_free_irq(adapter);
3313         al_eth_hw_stop(adapter);
3314
3315         al_eth_free_all_tx_resources(adapter);
3316         al_eth_free_all_rx_resources(adapter);
3317 }
3318
3319 static int
3320 al_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
3321 {
3322         struct al_eth_adapter   *adapter = ifp->if_softc;
3323         struct ifreq            *ifr = (struct ifreq *)data;
3324         int                     error = 0;
3325
3326         switch (command) {
3327         case SIOCSIFMTU:
3328         {
3329                 error = al_eth_check_mtu(adapter, ifr->ifr_mtu);
3330                 if (error != 0) {
3331                         device_printf(adapter->dev, "ioctl wrong mtu %u\n",
3332                             adapter->netdev->if_mtu);
3333                         break;
3334                 }
3335
3336                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3337                 adapter->netdev->if_mtu = ifr->ifr_mtu;
3338                 al_init(adapter);
3339                 break;
3340         }
3341         case SIOCSIFFLAGS:
3342                 if ((ifp->if_flags & IFF_UP) != 0) {
3343                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3344                                 if (((ifp->if_flags ^ adapter->if_flags) &
3345                                     (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
3346                                         device_printf_dbg(adapter->dev,
3347                                             "ioctl promisc/allmulti\n");
3348                                         al_eth_set_rx_mode(adapter);
3349                                 }
3350                         } else {
3351                                 error = al_eth_up(adapter);
3352                                 if (error == 0)
3353                                         ifp->if_drv_flags |= IFF_DRV_RUNNING;
3354                         }
3355                 } else {
3356                         if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3357                                 al_eth_down(adapter);
3358                                 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
3359                         }
3360                 }
3361
3362                 adapter->if_flags = ifp->if_flags;
3363                 break;
3364
3365         case SIOCADDMULTI:
3366         case SIOCDELMULTI:
3367                 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) {
3368                         device_printf_dbg(adapter->dev,
3369                             "ioctl add/del multi before\n");
3370                         al_eth_set_rx_mode(adapter);
3371 #ifdef DEVICE_POLLING
3372                         if ((ifp->if_capenable & IFCAP_POLLING) == 0)
3373 #endif
3374                 }
3375                 break;
3376         case SIOCSIFMEDIA:
3377         case SIOCGIFMEDIA:
3378                 if (adapter->mii != NULL)
3379                         error = ifmedia_ioctl(ifp, ifr,
3380                             &adapter->mii->mii_media, command);
3381                 else
3382                         error = ifmedia_ioctl(ifp, ifr,
3383                             &adapter->media, command);
3384                 break;
3385         case SIOCSIFCAP:
3386             {
3387                 int mask, reinit;
3388
3389                 reinit = 0;
3390                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3391 #ifdef DEVICE_POLLING
3392                 if ((mask & IFCAP_POLLING) != 0) {
3393                         if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) {
3394                                 if (error != 0)
3395                                         return (error);
3396                                 ifp->if_capenable |= IFCAP_POLLING;
3397                         } else {
3398                                 error = ether_poll_deregister(ifp);
3399                                 /* Enable interrupt even in error case */
3400                                 ifp->if_capenable &= ~IFCAP_POLLING;
3401                         }
3402                 }
3403 #endif
3404                 if ((mask & IFCAP_HWCSUM) != 0) {
3405                         /* apply to both rx and tx */
3406                         ifp->if_capenable ^= IFCAP_HWCSUM;
3407                         reinit = 1;
3408                 }
3409                 if ((mask & IFCAP_HWCSUM_IPV6) != 0) {
3410                         ifp->if_capenable ^= IFCAP_HWCSUM_IPV6;
3411                         reinit = 1;
3412                 }
3413                 if ((mask & IFCAP_TSO) != 0) {
3414                         ifp->if_capenable ^= IFCAP_TSO;
3415                         reinit = 1;
3416                 }
3417                 if ((mask & IFCAP_LRO) != 0) {
3418                         ifp->if_capenable ^= IFCAP_LRO;
3419                 }
3420                 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
3421                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3422                         reinit = 1;
3423                 }
3424                 if ((mask & IFCAP_VLAN_HWFILTER) != 0) {
3425                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3426                         reinit = 1;
3427                 }
3428                 if ((mask & IFCAP_VLAN_HWTSO) != 0) {
3429                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3430                         reinit = 1;
3431                 }
3432                 if ((reinit != 0) &&
3433                     ((ifp->if_drv_flags & IFF_DRV_RUNNING)) != 0)
3434                 {
3435                         al_init(adapter);
3436                 }
3437                 break;
3438             }
3439
3440         default:
3441                 error = ether_ioctl(ifp, command, data);
3442                 break;
3443         }
3444
3445         return (error);
3446 }
3447
3448 static int
3449 al_is_device_supported(device_t dev)
3450 {
3451         uint16_t pci_vendor_id = pci_get_vendor(dev);
3452         uint16_t pci_device_id = pci_get_device(dev);
3453
3454         return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS &&
3455             (pci_device_id == PCI_DEVICE_ID_AL_ETH ||
3456             pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED ||
3457             pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC ||
3458             pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC));
3459 }
3460
3461 /* Time in mSec to keep trying to read / write from MDIO in case of error */
3462 #define MDIO_TIMEOUT_MSEC       100
3463 #define MDIO_PAUSE_MSEC         10
3464
3465 static int
3466 al_miibus_readreg(device_t dev, int phy, int reg)
3467 {
3468         struct al_eth_adapter *adapter = device_get_softc(dev);
3469         uint16_t value = 0;
3470         int rc;
3471         int timeout = MDIO_TIMEOUT_MSEC;
3472
3473         while (timeout > 0) {
3474                 rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr,
3475                     -1, reg, &value);
3476
3477                 if (rc == 0)
3478                         return (value);
3479
3480                 device_printf_dbg(adapter->dev,
3481                     "mdio read failed. try again in 10 msec\n");
3482
3483                 timeout -= MDIO_PAUSE_MSEC;
3484                 pause("readred pause", MDIO_PAUSE_MSEC);
3485         }
3486
3487         if (rc != 0)
3488                 device_printf(adapter->dev, "MDIO read failed on timeout\n");
3489
3490         return (value);
3491 }
3492
3493 static int
3494 al_miibus_writereg(device_t dev, int phy, int reg, int value)
3495 {
3496         struct al_eth_adapter *adapter = device_get_softc(dev);
3497         int rc;
3498         int timeout = MDIO_TIMEOUT_MSEC;
3499
3500         while (timeout > 0) {
3501                 rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr,
3502                     -1, reg, value);
3503
3504                 if (rc == 0)
3505                         return (0);
3506
3507                 device_printf(adapter->dev,
3508                     "mdio write failed. try again in 10 msec\n");
3509
3510                 timeout -= MDIO_PAUSE_MSEC;
3511                 pause("miibus writereg", MDIO_PAUSE_MSEC);
3512         }
3513
3514         if (rc != 0)
3515                 device_printf(adapter->dev, "MDIO write failed on timeout\n");
3516
3517         return (rc);
3518 }
3519
3520 static void
3521 al_miibus_statchg(device_t dev)
3522 {
3523         struct al_eth_adapter *adapter = device_get_softc(dev);
3524
3525         device_printf_dbg(adapter->dev,
3526             "al_miibus_statchg: state has changed!\n");
3527         device_printf_dbg(adapter->dev,
3528             "al_miibus_statchg: active = 0x%x status = 0x%x\n",
3529             adapter->mii->mii_media_active, adapter->mii->mii_media_status);
3530
3531         if (adapter->up == 0)
3532                 return;
3533
3534         if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) {
3535                 if (adapter->mii->mii_media_status & IFM_ACTIVE) {
3536                         device_printf(adapter->dev, "link is UP\n");
3537                         adapter->netdev->if_link_state = LINK_STATE_UP;
3538                 } else {
3539                         device_printf(adapter->dev, "link is DOWN\n");
3540                         adapter->netdev->if_link_state = LINK_STATE_DOWN;
3541                 }
3542         }
3543 }
3544
3545 static void
3546 al_miibus_linkchg(device_t dev)
3547 {
3548         struct al_eth_adapter *adapter = device_get_softc(dev);
3549         uint8_t duplex = 0;
3550         uint8_t speed = 0;
3551
3552         if (adapter->mii == NULL)
3553                 return;
3554
3555         if ((adapter->netdev->if_flags & IFF_UP) == 0)
3556                 return;
3557
3558         /* Ignore link changes when link is not ready */
3559         if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) !=
3560             (IFM_AVALID | IFM_ACTIVE)) {
3561                 return;
3562         }
3563
3564         if ((adapter->mii->mii_media_active & IFM_FDX) != 0)
3565                 duplex = 1;
3566
3567         speed = IFM_SUBTYPE(adapter->mii->mii_media_active);
3568
3569         if (speed == IFM_10_T) {
3570                 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3571                     AL_10BASE_T_SPEED, duplex);
3572                 return;
3573         }
3574
3575         if (speed == IFM_100_TX) {
3576                 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3577                     AL_100BASE_TX_SPEED, duplex);
3578                 return;
3579         }
3580
3581         if (speed == IFM_1000_T) {
3582                 al_eth_mac_link_config(&adapter->hal_adapter, 0, 1,
3583                     AL_1000BASE_T_SPEED, duplex);
3584                 return;
3585         }
3586
3587         device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n",
3588             adapter->mii->mii_media_active);
3589 }