]> CyberLeo.Net >> Repos - FreeBSD/releng/9.3.git/blob - sys/dev/oce/oce_if.c
Copy stable/9 to releng/9.3 as part of the 9.3-RELEASE cycle.
[FreeBSD/releng/9.3.git] / sys / dev / oce / oce_if.c
1 /*-
2  * Copyright (C) 2013 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38
39 /* $FreeBSD$ */
40
41 #include "opt_inet6.h"
42 #include "opt_inet.h"
43
44 #include "oce_if.h"
45
46 /* UE Status Low CSR */
47 static char *ue_status_low_desc[] = {
48         "CEV",
49         "CTX",
50         "DBUF",
51         "ERX",
52         "Host",
53         "MPU",
54         "NDMA",
55         "PTC ",
56         "RDMA ",
57         "RXF ",
58         "RXIPS ",
59         "RXULP0 ",
60         "RXULP1 ",
61         "RXULP2 ",
62         "TIM ",
63         "TPOST ",
64         "TPRE ",
65         "TXIPS ",
66         "TXULP0 ",
67         "TXULP1 ",
68         "UC ",
69         "WDMA ",
70         "TXULP2 ",
71         "HOST1 ",
72         "P0_OB_LINK ",
73         "P1_OB_LINK ",
74         "HOST_GPIO ",
75         "MBOX ",
76         "AXGMAC0",
77         "AXGMAC1",
78         "JTAG",
79         "MPU_INTPEND"
80 };
81
82 /* UE Status High CSR */
83 static char *ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118
119 /* Driver entry points prototypes */
120 static int  oce_probe(device_t dev);
121 static int  oce_attach(device_t dev);
122 static int  oce_detach(device_t dev);
123 static int  oce_shutdown(device_t dev);
124 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
125 static void oce_init(void *xsc);
126 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
127 static void oce_multiq_flush(struct ifnet *ifp);
128
129 /* Driver interrupt routines protypes */
130 static void oce_intr(void *arg, int pending);
131 static int  oce_setup_intr(POCE_SOFTC sc);
132 static int  oce_fast_isr(void *arg);
133 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
134                           void (*isr) (void *arg, int pending));
135
136 /* Media callbacks prototypes */
137 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
138 static int  oce_media_change(struct ifnet *ifp);
139
140 /* Transmit routines prototypes */
141 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
142 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
143 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
144                                         uint32_t status);
145 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
146                                  struct oce_wq *wq);
147
148 /* Receive routines prototypes */
149 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
150 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
151 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
152 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
153                                                 struct oce_nic_rx_cqe *cqe);
154
155 /* Helper function prototypes in this file */
156 static int  oce_attach_ifp(POCE_SOFTC sc);
157 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
158 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
159 static int  oce_vid_config(POCE_SOFTC sc);
160 static void oce_mac_addr_set(POCE_SOFTC sc);
161 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
162 static void oce_local_timer(void *arg);
163 static void oce_if_deactivate(POCE_SOFTC sc);
164 static void oce_if_activate(POCE_SOFTC sc);
165 static void setup_max_queues_want(POCE_SOFTC sc);
166 static void update_queues_got(POCE_SOFTC sc);
167 static void process_link_state(POCE_SOFTC sc,
168                  struct oce_async_cqe_link_state *acqe);
169 static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
170 static void oce_get_config(POCE_SOFTC sc);
171 static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
172
173 /* IP specific */
174 #if defined(INET6) || defined(INET)
175 static int  oce_init_lro(POCE_SOFTC sc);
176 static void oce_rx_flush_lro(struct oce_rq *rq);
177 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
178 #endif
179
180 static device_method_t oce_dispatch[] = {
181         DEVMETHOD(device_probe, oce_probe),
182         DEVMETHOD(device_attach, oce_attach),
183         DEVMETHOD(device_detach, oce_detach),
184         DEVMETHOD(device_shutdown, oce_shutdown),
185         {0, 0}
186 };
187
188 static driver_t oce_driver = {
189         "oce",
190         oce_dispatch,
191         sizeof(OCE_SOFTC)
192 };
193 static devclass_t oce_devclass;
194
195
196 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
197 MODULE_DEPEND(oce, pci, 1, 1, 1);
198 MODULE_DEPEND(oce, ether, 1, 1, 1);
199 MODULE_VERSION(oce, 1);
200
201
202 /* global vars */
203 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
204
205 /* Module capabilites and parameters */
206 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
207 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
208
209
210 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
211 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
212
213
214 /* Supported devices table */
215 static uint32_t supportedDevices[] =  {
216         (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
217         (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
218         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
219         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
220         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
221         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
222 };
223
224
225
226
227 /*****************************************************************************
228  *                      Driver entry points functions                        *
229  *****************************************************************************/
230
231 static int
232 oce_probe(device_t dev)
233 {
234         uint16_t vendor = 0;
235         uint16_t device = 0;
236         int i = 0;
237         char str[256] = {0};
238         POCE_SOFTC sc;
239
240         sc = device_get_softc(dev);
241         bzero(sc, sizeof(OCE_SOFTC));
242         sc->dev = dev;
243
244         vendor = pci_get_vendor(dev);
245         device = pci_get_device(dev);
246
247         for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
248                 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
249                         if (device == (supportedDevices[i] & 0xffff)) {
250                                 sprintf(str, "%s:%s", "Emulex CNA NIC function",
251                                         component_revision);
252                                 device_set_desc_copy(dev, str);
253
254                                 switch (device) {
255                                 case PCI_PRODUCT_BE2:
256                                         sc->flags |= OCE_FLAGS_BE2;
257                                         break;
258                                 case PCI_PRODUCT_BE3:
259                                         sc->flags |= OCE_FLAGS_BE3;
260                                         break;
261                                 case PCI_PRODUCT_XE201:
262                                 case PCI_PRODUCT_XE201_VF:
263                                         sc->flags |= OCE_FLAGS_XE201;
264                                         break;
265                                 case PCI_PRODUCT_SH:
266                                         sc->flags |= OCE_FLAGS_SH;
267                                         break;
268                                 default:
269                                         return ENXIO;
270                                 }
271                                 return BUS_PROBE_DEFAULT;
272                         }
273                 }
274         }
275
276         return ENXIO;
277 }
278
279
280 static int
281 oce_attach(device_t dev)
282 {
283         POCE_SOFTC sc;
284         int rc = 0;
285
286         sc = device_get_softc(dev);
287
288         rc = oce_hw_pci_alloc(sc);
289         if (rc)
290                 return rc;
291
292         sc->tx_ring_size = OCE_TX_RING_SIZE;
293         sc->rx_ring_size = OCE_RX_RING_SIZE;
294         sc->rq_frag_size = OCE_RQ_BUF_SIZE;
295         sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
296         sc->promisc      = OCE_DEFAULT_PROMISCUOUS;
297
298         LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
299         LOCK_CREATE(&sc->dev_lock,  "Device_lock");
300
301         /* initialise the hardware */
302         rc = oce_hw_init(sc);
303         if (rc)
304                 goto pci_res_free;
305
306         oce_get_config(sc);
307
308         setup_max_queues_want(sc);      
309
310         rc = oce_setup_intr(sc);
311         if (rc)
312                 goto mbox_free;
313
314         rc = oce_queue_init_all(sc);
315         if (rc)
316                 goto intr_free;
317
318         rc = oce_attach_ifp(sc);
319         if (rc)
320                 goto queues_free;
321
322 #if defined(INET6) || defined(INET)
323         rc = oce_init_lro(sc);
324         if (rc)
325                 goto ifp_free;
326 #endif
327
328         rc = oce_hw_start(sc);
329         if (rc)
330                 goto lro_free;
331
332         sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
333                                 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
334         sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
335                                 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
336
337         rc = oce_stats_init(sc);
338         if (rc)
339                 goto vlan_free;
340
341         oce_add_sysctls(sc);
342
343         callout_init(&sc->timer, CALLOUT_MPSAFE);
344         rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
345         if (rc)
346                 goto stats_free;
347
348         return 0;
349
350 stats_free:
351         callout_drain(&sc->timer);
352         oce_stats_free(sc);
353 vlan_free:
354         if (sc->vlan_attach)
355                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
356         if (sc->vlan_detach)
357                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
358         oce_hw_intr_disable(sc);
359 lro_free:
360 #if defined(INET6) || defined(INET)
361         oce_free_lro(sc);
362 ifp_free:
363 #endif
364         ether_ifdetach(sc->ifp);
365         if_free(sc->ifp);
366 queues_free:
367         oce_queue_release_all(sc);
368 intr_free:
369         oce_intr_free(sc);
370 mbox_free:
371         oce_dma_free(sc, &sc->bsmbx);
372 pci_res_free:
373         oce_hw_pci_free(sc);
374         LOCK_DESTROY(&sc->dev_lock);
375         LOCK_DESTROY(&sc->bmbx_lock);
376         return rc;
377
378 }
379
380
381 static int
382 oce_detach(device_t dev)
383 {
384         POCE_SOFTC sc = device_get_softc(dev);
385
386         LOCK(&sc->dev_lock);
387         oce_if_deactivate(sc);
388         UNLOCK(&sc->dev_lock);
389
390         callout_drain(&sc->timer);
391         
392         if (sc->vlan_attach != NULL)
393                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
394         if (sc->vlan_detach != NULL)
395                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
396
397         ether_ifdetach(sc->ifp);
398
399         if_free(sc->ifp);
400
401         oce_hw_shutdown(sc);
402
403         bus_generic_detach(dev);
404
405         return 0;
406 }
407
408
409 static int
410 oce_shutdown(device_t dev)
411 {
412         int rc;
413         
414         rc = oce_detach(dev);
415
416         return rc;      
417 }
418
419
420 static int
421 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
422 {
423         struct ifreq *ifr = (struct ifreq *)data;
424         POCE_SOFTC sc = ifp->if_softc;
425         int rc = 0;
426         uint32_t u;
427
428         switch (command) {
429
430         case SIOCGIFMEDIA:
431                 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
432                 break;
433
434         case SIOCSIFMTU:
435                 if (ifr->ifr_mtu > OCE_MAX_MTU)
436                         rc = EINVAL;
437                 else
438                         ifp->if_mtu = ifr->ifr_mtu;
439                 break;
440
441         case SIOCSIFFLAGS:
442                 if (ifp->if_flags & IFF_UP) {
443                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
444                                 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;       
445                                 oce_init(sc);
446                         }
447                         device_printf(sc->dev, "Interface Up\n");       
448                 } else {
449                         LOCK(&sc->dev_lock);
450
451                         sc->ifp->if_drv_flags &=
452                             ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
453                         oce_if_deactivate(sc);
454
455                         UNLOCK(&sc->dev_lock);
456
457                         device_printf(sc->dev, "Interface Down\n");
458                 }
459
460                 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
461                         if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1))))
462                                 sc->promisc = TRUE;
463                 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
464                         if (!oce_rxf_set_promiscuous(sc, 0))
465                                 sc->promisc = FALSE;
466                 }
467
468                 break;
469
470         case SIOCADDMULTI:
471         case SIOCDELMULTI:
472                 rc = oce_hw_update_multicast(sc);
473                 if (rc)
474                         device_printf(sc->dev,
475                                 "Update multicast address failed\n");
476                 break;
477
478         case SIOCSIFCAP:
479                 u = ifr->ifr_reqcap ^ ifp->if_capenable;
480
481                 if (u & IFCAP_TXCSUM) {
482                         ifp->if_capenable ^= IFCAP_TXCSUM;
483                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
484                         
485                         if (IFCAP_TSO & ifp->if_capenable &&
486                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
487                                 ifp->if_capenable &= ~IFCAP_TSO;
488                                 ifp->if_hwassist &= ~CSUM_TSO;
489                                 if_printf(ifp,
490                                          "TSO disabled due to -txcsum.\n");
491                         }
492                 }
493
494                 if (u & IFCAP_RXCSUM)
495                         ifp->if_capenable ^= IFCAP_RXCSUM;
496
497                 if (u & IFCAP_TSO4) {
498                         ifp->if_capenable ^= IFCAP_TSO4;
499
500                         if (IFCAP_TSO & ifp->if_capenable) {
501                                 if (IFCAP_TXCSUM & ifp->if_capenable)
502                                         ifp->if_hwassist |= CSUM_TSO;
503                                 else {
504                                         ifp->if_capenable &= ~IFCAP_TSO;
505                                         ifp->if_hwassist &= ~CSUM_TSO;
506                                         if_printf(ifp,
507                                             "Enable txcsum first.\n");
508                                         rc = EAGAIN;
509                                 }
510                         } else
511                                 ifp->if_hwassist &= ~CSUM_TSO;
512                 }
513
514                 if (u & IFCAP_VLAN_HWTAGGING)
515                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
516
517                 if (u & IFCAP_VLAN_HWFILTER) {
518                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
519                         oce_vid_config(sc);
520                 }
521 #if defined(INET6) || defined(INET)
522                 if (u & IFCAP_LRO)
523                         ifp->if_capenable ^= IFCAP_LRO;
524 #endif
525
526                 break;
527
528         case SIOCGPRIVATE_0:
529                 rc = oce_handle_passthrough(ifp, data);
530                 break;
531         default:
532                 rc = ether_ioctl(ifp, command, data);
533                 break;
534         }
535
536         return rc;
537 }
538
539
540 static void
541 oce_init(void *arg)
542 {
543         POCE_SOFTC sc = arg;
544         
545         LOCK(&sc->dev_lock);
546
547         if (sc->ifp->if_flags & IFF_UP) {
548                 oce_if_deactivate(sc);
549                 oce_if_activate(sc);
550         }
551         
552         UNLOCK(&sc->dev_lock);
553
554 }
555
556
557 static int
558 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
559 {
560         POCE_SOFTC sc = ifp->if_softc;
561         struct oce_wq *wq = NULL;
562         int queue_index = 0;
563         int status = 0;
564
565         if (!sc->link_status)
566                 return ENXIO;
567
568         if ((m->m_flags & M_FLOWID) != 0)
569                 queue_index = m->m_pkthdr.flowid % sc->nwqs;
570
571         wq = sc->wq[queue_index];
572
573         LOCK(&wq->tx_lock);
574         status = oce_multiq_transmit(ifp, m, wq);
575         UNLOCK(&wq->tx_lock);
576
577         return status;
578
579 }
580
581
582 static void
583 oce_multiq_flush(struct ifnet *ifp)
584 {
585         POCE_SOFTC sc = ifp->if_softc;
586         struct mbuf     *m;
587         int i = 0;
588
589         for (i = 0; i < sc->nwqs; i++) {
590                 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
591                         m_freem(m);
592         }
593         if_qflush(ifp);
594 }
595
596
597
598 /*****************************************************************************
599  *                   Driver interrupt routines functions                     *
600  *****************************************************************************/
601
602 static void
603 oce_intr(void *arg, int pending)
604 {
605
606         POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
607         POCE_SOFTC sc = ii->sc;
608         struct oce_eq *eq = ii->eq;
609         struct oce_eqe *eqe;
610         struct oce_cq *cq = NULL;
611         int i, num_eqes = 0;
612
613
614         bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
615                                  BUS_DMASYNC_POSTWRITE);
616         do {
617                 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
618                 if (eqe->evnt == 0)
619                         break;
620                 eqe->evnt = 0;
621                 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
622                                         BUS_DMASYNC_POSTWRITE);
623                 RING_GET(eq->ring, 1);
624                 num_eqes++;
625
626         } while (TRUE);
627         
628         if (!num_eqes)
629                 goto eq_arm; /* Spurious */
630
631         /* Clear EQ entries, but dont arm */
632         oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
633
634         /* Process TX, RX and MCC. But dont arm CQ*/
635         for (i = 0; i < eq->cq_valid; i++) {
636                 cq = eq->cq[i];
637                 (*cq->cq_handler)(cq->cb_arg);
638         }
639
640         /* Arm all cqs connected to this EQ */
641         for (i = 0; i < eq->cq_valid; i++) {
642                 cq = eq->cq[i];
643                 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
644         }
645
646 eq_arm:
647         oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
648
649         return;
650 }
651
652
653 static int
654 oce_setup_intr(POCE_SOFTC sc)
655 {
656         int rc = 0, use_intx = 0;
657         int vector = 0, req_vectors = 0;
658
659         if (is_rss_enabled(sc))
660                 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
661         else
662                 req_vectors = 1;
663
664         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
665                 sc->intr_count = req_vectors;
666                 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
667                 if (rc != 0) {
668                         use_intx = 1;
669                         pci_release_msi(sc->dev);
670                 } else
671                         sc->flags |= OCE_FLAGS_USING_MSIX;
672         } else
673                 use_intx = 1;
674
675         if (use_intx)
676                 sc->intr_count = 1;
677
678         /* Scale number of queues based on intr we got */
679         update_queues_got(sc);
680
681         if (use_intx) {
682                 device_printf(sc->dev, "Using legacy interrupt\n");
683                 rc = oce_alloc_intr(sc, vector, oce_intr);
684                 if (rc)
685                         goto error;             
686         } else {
687                 for (; vector < sc->intr_count; vector++) {
688                         rc = oce_alloc_intr(sc, vector, oce_intr);
689                         if (rc)
690                                 goto error;
691                 }
692         }
693
694         return 0;
695 error:
696         oce_intr_free(sc);
697         return rc;
698 }
699
700
701 static int
702 oce_fast_isr(void *arg)
703 {
704         POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
705         POCE_SOFTC sc = ii->sc;
706
707         if (ii->eq == NULL)
708                 return FILTER_STRAY;
709
710         oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
711
712         taskqueue_enqueue_fast(ii->tq, &ii->task);
713
714         ii->eq->intr++; 
715
716         return FILTER_HANDLED;
717 }
718
719
720 static int
721 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
722 {
723         POCE_INTR_INFO ii = &sc->intrs[vector];
724         int rc = 0, rr;
725
726         if (vector >= OCE_MAX_EQ)
727                 return (EINVAL);
728
729         /* Set the resource id for the interrupt.
730          * MSIx is vector + 1 for the resource id,
731          * INTx is 0 for the resource id.
732          */
733         if (sc->flags & OCE_FLAGS_USING_MSIX)
734                 rr = vector + 1;
735         else
736                 rr = 0;
737         ii->intr_res = bus_alloc_resource_any(sc->dev,
738                                               SYS_RES_IRQ,
739                                               &rr, RF_ACTIVE|RF_SHAREABLE);
740         ii->irq_rr = rr;
741         if (ii->intr_res == NULL) {
742                 device_printf(sc->dev,
743                           "Could not allocate interrupt\n");
744                 rc = ENXIO;
745                 return rc;
746         }
747
748         TASK_INIT(&ii->task, 0, isr, ii);
749         ii->vector = vector;
750         sprintf(ii->task_name, "oce_task[%d]", ii->vector);
751         ii->tq = taskqueue_create_fast(ii->task_name,
752                         M_NOWAIT,
753                         taskqueue_thread_enqueue,
754                         &ii->tq);
755         taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
756                         device_get_nameunit(sc->dev));
757
758         ii->sc = sc;
759         rc = bus_setup_intr(sc->dev,
760                         ii->intr_res,
761                         INTR_TYPE_NET,
762                         oce_fast_isr, NULL, ii, &ii->tag);
763         return rc;
764
765 }
766
767
768 void
769 oce_intr_free(POCE_SOFTC sc)
770 {
771         int i = 0;
772         
773         for (i = 0; i < sc->intr_count; i++) {
774                 
775                 if (sc->intrs[i].tag != NULL)
776                         bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
777                                                 sc->intrs[i].tag);
778                 if (sc->intrs[i].tq != NULL)
779                         taskqueue_free(sc->intrs[i].tq);
780                 
781                 if (sc->intrs[i].intr_res != NULL)
782                         bus_release_resource(sc->dev, SYS_RES_IRQ,
783                                                 sc->intrs[i].irq_rr,
784                                                 sc->intrs[i].intr_res);
785                 sc->intrs[i].tag = NULL;
786                 sc->intrs[i].intr_res = NULL;
787         }
788
789         if (sc->flags & OCE_FLAGS_USING_MSIX)
790                 pci_release_msi(sc->dev);
791
792 }
793
794
795
796 /******************************************************************************
797 *                         Media callbacks functions                           *
798 ******************************************************************************/
799
800 static void
801 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
802 {
803         POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
804
805
806         req->ifm_status = IFM_AVALID;
807         req->ifm_active = IFM_ETHER;
808         
809         if (sc->link_status == 1)
810                 req->ifm_status |= IFM_ACTIVE;
811         else 
812                 return;
813         
814         switch (sc->link_speed) {
815         case 1: /* 10 Mbps */
816                 req->ifm_active |= IFM_10_T | IFM_FDX;
817                 sc->speed = 10;
818                 break;
819         case 2: /* 100 Mbps */
820                 req->ifm_active |= IFM_100_TX | IFM_FDX;
821                 sc->speed = 100;
822                 break;
823         case 3: /* 1 Gbps */
824                 req->ifm_active |= IFM_1000_T | IFM_FDX;
825                 sc->speed = 1000;
826                 break;
827         case 4: /* 10 Gbps */
828                 req->ifm_active |= IFM_10G_SR | IFM_FDX;
829                 sc->speed = 10000;
830                 break;
831         }
832         
833         return;
834 }
835
836
837 int
838 oce_media_change(struct ifnet *ifp)
839 {
840         return 0;
841 }
842
843
844
845
846 /*****************************************************************************
847  *                        Transmit routines functions                        *
848  *****************************************************************************/
849
850 static int
851 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
852 {
853         int rc = 0, i, retry_cnt = 0;
854         bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
855         struct mbuf *m, *m_temp;
856         struct oce_wq *wq = sc->wq[wq_index];
857         struct oce_packet_desc *pd;
858         struct oce_nic_hdr_wqe *nichdr;
859         struct oce_nic_frag_wqe *nicfrag;
860         int num_wqes;
861         uint32_t reg_value;
862         boolean_t complete = TRUE;
863
864         m = *mpp;
865         if (!m)
866                 return EINVAL;
867
868         if (!(m->m_flags & M_PKTHDR)) {
869                 rc = ENXIO;
870                 goto free_ret;
871         }
872
873         if(oce_tx_asic_stall_verify(sc, m)) {
874                 m = oce_insert_vlan_tag(sc, m, &complete);
875                 if(!m) {
876                         device_printf(sc->dev, "Insertion unsuccessful\n");
877                         return 0;
878                 }
879
880         }
881
882         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
883                 /* consolidate packet buffers for TSO/LSO segment offload */
884 #if defined(INET6) || defined(INET)
885                 m = oce_tso_setup(sc, mpp);
886 #else
887                 m = NULL;
888 #endif
889                 if (m == NULL) {
890                         rc = ENXIO;
891                         goto free_ret;
892                 }
893         }
894
895         pd = &wq->pckts[wq->pkt_desc_head];
896 retry:
897         rc = bus_dmamap_load_mbuf_sg(wq->tag,
898                                      pd->map,
899                                      m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
900         if (rc == 0) {
901                 num_wqes = pd->nsegs + 1;
902                 if (IS_BE(sc) || IS_SH(sc)) {
903                         /*Dummy required only for BE3.*/
904                         if (num_wqes & 1)
905                                 num_wqes++;
906                 }
907                 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
908                         bus_dmamap_unload(wq->tag, pd->map);
909                         return EBUSY;
910                 }
911                 atomic_store_rel_int(&wq->pkt_desc_head,
912                                      (wq->pkt_desc_head + 1) % \
913                                       OCE_WQ_PACKET_ARRAY_SIZE);
914                 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
915                 pd->mbuf = m;
916
917                 nichdr =
918                     RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
919                 nichdr->u0.dw[0] = 0;
920                 nichdr->u0.dw[1] = 0;
921                 nichdr->u0.dw[2] = 0;
922                 nichdr->u0.dw[3] = 0;
923
924                 nichdr->u0.s.complete = complete;
925                 nichdr->u0.s.event = 1;
926                 nichdr->u0.s.crc = 1;
927                 nichdr->u0.s.forward = 0;
928                 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
929                 nichdr->u0.s.udpcs =
930                         (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
931                 nichdr->u0.s.tcpcs =
932                         (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
933                 nichdr->u0.s.num_wqe = num_wqes;
934                 nichdr->u0.s.total_length = m->m_pkthdr.len;
935
936                 if (m->m_flags & M_VLANTAG) {
937                         nichdr->u0.s.vlan = 1; /*Vlan present*/
938                         nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
939                 }
940
941                 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
942                         if (m->m_pkthdr.tso_segsz) {
943                                 nichdr->u0.s.lso = 1;
944                                 nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
945                         }
946                         if (!IS_BE(sc) || !IS_SH(sc))
947                                 nichdr->u0.s.ipcs = 1;
948                 }
949
950                 RING_PUT(wq->ring, 1);
951                 atomic_add_int(&wq->ring->num_used, 1);
952
953                 for (i = 0; i < pd->nsegs; i++) {
954                         nicfrag =
955                             RING_GET_PRODUCER_ITEM_VA(wq->ring,
956                                                       struct oce_nic_frag_wqe);
957                         nicfrag->u0.s.rsvd0 = 0;
958                         nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
959                         nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
960                         nicfrag->u0.s.frag_len = segs[i].ds_len;
961                         pd->wqe_idx = wq->ring->pidx;
962                         RING_PUT(wq->ring, 1);
963                         atomic_add_int(&wq->ring->num_used, 1);
964                 }
965                 if (num_wqes > (pd->nsegs + 1)) {
966                         nicfrag =
967                             RING_GET_PRODUCER_ITEM_VA(wq->ring,
968                                                       struct oce_nic_frag_wqe);
969                         nicfrag->u0.dw[0] = 0;
970                         nicfrag->u0.dw[1] = 0;
971                         nicfrag->u0.dw[2] = 0;
972                         nicfrag->u0.dw[3] = 0;
973                         pd->wqe_idx = wq->ring->pidx;
974                         RING_PUT(wq->ring, 1);
975                         atomic_add_int(&wq->ring->num_used, 1);
976                         pd->nsegs++;
977                 }
978
979                 sc->ifp->if_opackets++;
980                 wq->tx_stats.tx_reqs++;
981                 wq->tx_stats.tx_wrbs += num_wqes;
982                 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
983                 wq->tx_stats.tx_pkts++;
984
985                 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
986                                 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
987                 reg_value = (num_wqes << 16) | wq->wq_id;
988                 OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
989
990         } else if (rc == EFBIG) {
991                 if (retry_cnt == 0) {
992                         m_temp = m_defrag(m, M_NOWAIT);
993                         if (m_temp == NULL)
994                                 goto free_ret;
995                         m = m_temp;
996                         *mpp = m_temp;
997                         retry_cnt = retry_cnt + 1;
998                         goto retry;
999                 } else
1000                         goto free_ret;
1001         } else if (rc == ENOMEM)
1002                 return rc;
1003         else
1004                 goto free_ret;
1005         
1006         return 0;
1007
1008 free_ret:
1009         m_freem(*mpp);
1010         *mpp = NULL;
1011         return rc;
1012 }
1013
1014
1015 static void
1016 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
1017 {
1018         struct oce_packet_desc *pd;
1019         POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
1020         struct mbuf *m;
1021
1022         pd = &wq->pckts[wq->pkt_desc_tail];
1023         atomic_store_rel_int(&wq->pkt_desc_tail,
1024                              (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE); 
1025         atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1);
1026         bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1027         bus_dmamap_unload(wq->tag, pd->map);
1028
1029         m = pd->mbuf;
1030         m_freem(m);
1031         pd->mbuf = NULL;
1032
1033
1034         if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
1035                 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
1036                         sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
1037                         oce_tx_restart(sc, wq); 
1038                 }
1039         }
1040 }
1041
1042
1043 static void
1044 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1045 {
1046
1047         if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1048                 return;
1049
1050 #if __FreeBSD_version >= 800000
1051         if (!drbr_empty(sc->ifp, wq->br))
1052 #else
1053         if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1054 #endif
1055                 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1056
1057 }
1058
1059
1060 #if defined(INET6) || defined(INET)
1061 static struct mbuf *
1062 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
1063 {
1064         struct mbuf *m;
1065 #ifdef INET
1066         struct ip *ip;
1067 #endif
1068 #ifdef INET6
1069         struct ip6_hdr *ip6;
1070 #endif
1071         struct ether_vlan_header *eh;
1072         struct tcphdr *th;
1073         uint16_t etype;
1074         int total_len = 0, ehdrlen = 0;
1075         
1076         m = *mpp;
1077
1078         if (M_WRITABLE(m) == 0) {
1079                 m = m_dup(*mpp, M_NOWAIT);
1080                 if (!m)
1081                         return NULL;
1082                 m_freem(*mpp);
1083                 *mpp = m;
1084         }
1085
1086         eh = mtod(m, struct ether_vlan_header *);
1087         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1088                 etype = ntohs(eh->evl_proto);
1089                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1090         } else {
1091                 etype = ntohs(eh->evl_encap_proto);
1092                 ehdrlen = ETHER_HDR_LEN;
1093         }
1094
1095         switch (etype) {
1096 #ifdef INET
1097         case ETHERTYPE_IP:
1098                 ip = (struct ip *)(m->m_data + ehdrlen);
1099                 if (ip->ip_p != IPPROTO_TCP)
1100                         return NULL;
1101                 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1102
1103                 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1104                 break;
1105 #endif
1106 #ifdef INET6
1107         case ETHERTYPE_IPV6:
1108                 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1109                 if (ip6->ip6_nxt != IPPROTO_TCP)
1110                         return NULL;
1111                 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1112
1113                 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1114                 break;
1115 #endif
1116         default:
1117                 return NULL;
1118         }
1119         
1120         m = m_pullup(m, total_len);
1121         if (!m)
1122                 return NULL;
1123         *mpp = m;
1124         return m;
1125         
1126 }
1127 #endif /* INET6 || INET */
1128
1129 void
1130 oce_tx_task(void *arg, int npending)
1131 {
1132         struct oce_wq *wq = arg;
1133         POCE_SOFTC sc = wq->parent;
1134         struct ifnet *ifp = sc->ifp;
1135         int rc = 0;
1136
1137 #if __FreeBSD_version >= 800000
1138         LOCK(&wq->tx_lock);
1139         rc = oce_multiq_transmit(ifp, NULL, wq);
1140         if (rc) {
1141                 device_printf(sc->dev,
1142                                 "TX[%d] restart failed\n", wq->queue_index);
1143         }
1144         UNLOCK(&wq->tx_lock);
1145 #else
1146         oce_start(ifp);
1147 #endif
1148
1149 }
1150
1151
1152 void
1153 oce_start(struct ifnet *ifp)
1154 {
1155         POCE_SOFTC sc = ifp->if_softc;
1156         struct mbuf *m;
1157         int rc = 0;
1158         int def_q = 0; /* Defualt tx queue is 0*/
1159
1160         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1161                         IFF_DRV_RUNNING)
1162                 return;
1163
1164         if (!sc->link_status)
1165                 return;
1166         
1167         do {
1168                 IF_DEQUEUE(&sc->ifp->if_snd, m);
1169                 if (m == NULL)
1170                         break;
1171
1172                 LOCK(&sc->wq[def_q]->tx_lock);
1173                 rc = oce_tx(sc, &m, def_q);
1174                 UNLOCK(&sc->wq[def_q]->tx_lock);
1175                 if (rc) {
1176                         if (m != NULL) {
1177                                 sc->wq[def_q]->tx_stats.tx_stops ++;
1178                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1179                                 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1180                                 m = NULL;
1181                         }
1182                         break;
1183                 }
1184                 if (m != NULL)
1185                         ETHER_BPF_MTAP(ifp, m);
1186
1187         } while (TRUE);
1188
1189         return;
1190 }
1191
1192
1193 /* Handle the Completion Queue for transmit */
1194 uint16_t
1195 oce_wq_handler(void *arg)
1196 {
1197         struct oce_wq *wq = (struct oce_wq *)arg;
1198         POCE_SOFTC sc = wq->parent;
1199         struct oce_cq *cq = wq->cq;
1200         struct oce_nic_tx_cqe *cqe;
1201         int num_cqes = 0;
1202
1203         bus_dmamap_sync(cq->ring->dma.tag,
1204                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1205         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1206         while (cqe->u0.dw[3]) {
1207                 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1208
1209                 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1210                 if (wq->ring->cidx >= wq->ring->num_items)
1211                         wq->ring->cidx -= wq->ring->num_items;
1212
1213                 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1214                 wq->tx_stats.tx_compl++;
1215                 cqe->u0.dw[3] = 0;
1216                 RING_GET(cq->ring, 1);
1217                 bus_dmamap_sync(cq->ring->dma.tag,
1218                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1219                 cqe =
1220                     RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1221                 num_cqes++;
1222         }
1223
1224         if (num_cqes)
1225                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1226
1227         return 0;
1228 }
1229
1230
1231 static int 
1232 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1233 {
1234         POCE_SOFTC sc = ifp->if_softc;
1235         int status = 0, queue_index = 0;
1236         struct mbuf *next = NULL;
1237         struct buf_ring *br = NULL;
1238
1239         br  = wq->br;
1240         queue_index = wq->queue_index;
1241
1242         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1243                 IFF_DRV_RUNNING) {
1244                 if (m != NULL)
1245                         status = drbr_enqueue(ifp, br, m);
1246                 return status;
1247         }
1248
1249         if (m != NULL) {
1250                 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1251                         return status;
1252         } 
1253         while ((next = drbr_peek(ifp, br)) != NULL) {
1254                 if (oce_tx(sc, &next, queue_index)) {
1255                         if (next == NULL) {
1256                                 drbr_advance(ifp, br);
1257                         } else {
1258                                 drbr_putback(ifp, br, next);
1259                                 wq->tx_stats.tx_stops ++;
1260                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1261                                 status = drbr_enqueue(ifp, br, next);
1262                         }  
1263                         break;
1264                 }
1265                 drbr_advance(ifp, br);
1266                 ifp->if_obytes += next->m_pkthdr.len;
1267                 if (next->m_flags & M_MCAST)
1268                         ifp->if_omcasts++;
1269                 ETHER_BPF_MTAP(ifp, next);
1270         }
1271
1272         return status;
1273 }
1274
1275
1276
1277
1278 /*****************************************************************************
1279  *                          Receive  routines functions                      *
1280  *****************************************************************************/
1281
1282 static void
1283 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1284 {
1285         uint32_t out;
1286         struct oce_packet_desc *pd;
1287         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1288         int i, len, frag_len;
1289         struct mbuf *m = NULL, *tail = NULL;
1290         uint16_t vtag;
1291
1292         len = cqe->u0.s.pkt_size;
1293         if (!len) {
1294                 /*partial DMA workaround for Lancer*/
1295                 oce_discard_rx_comp(rq, cqe);
1296                 goto exit;
1297         }
1298
1299          /* Get vlan_tag value */
1300         if(IS_BE(sc) || IS_SH(sc))
1301                 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1302         else
1303                 vtag = cqe->u0.s.vlan_tag;
1304
1305
1306         for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1307
1308                 if (rq->packets_out == rq->packets_in) {
1309                         device_printf(sc->dev,
1310                                   "RQ transmit descriptor missing\n");
1311                 }
1312                 out = rq->packets_out + 1;
1313                 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1314                         out = 0;
1315                 pd = &rq->pckts[rq->packets_out];
1316                 rq->packets_out = out;
1317
1318                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1319                 bus_dmamap_unload(rq->tag, pd->map);
1320                 rq->pending--;
1321
1322                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1323                 pd->mbuf->m_len = frag_len;
1324
1325                 if (tail != NULL) {
1326                         /* additional fragments */
1327                         pd->mbuf->m_flags &= ~M_PKTHDR;
1328                         tail->m_next = pd->mbuf;
1329                         tail = pd->mbuf;
1330                 } else {
1331                         /* first fragment, fill out much of the packet header */
1332                         pd->mbuf->m_pkthdr.len = len;
1333                         pd->mbuf->m_pkthdr.csum_flags = 0;
1334                         if (IF_CSUM_ENABLED(sc)) {
1335                                 if (cqe->u0.s.l4_cksum_pass) {
1336                                         pd->mbuf->m_pkthdr.csum_flags |=
1337                                             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1338                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1339                                 }
1340                                 if (cqe->u0.s.ip_cksum_pass) {
1341                                         if (!cqe->u0.s.ip_ver) { /* IPV4 */
1342                                                 pd->mbuf->m_pkthdr.csum_flags |=
1343                                                 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1344                                         }
1345                                 }
1346                         }
1347                         m = tail = pd->mbuf;
1348                 }
1349                 pd->mbuf = NULL;
1350                 len -= frag_len;
1351         }
1352
1353         if (m) {
1354                 if (!oce_cqe_portid_valid(sc, cqe)) {
1355                          m_freem(m);
1356                          goto exit;
1357                 } 
1358
1359                 m->m_pkthdr.rcvif = sc->ifp;
1360 #if __FreeBSD_version >= 800000
1361                 if (rq->queue_index)
1362                         m->m_pkthdr.flowid = (rq->queue_index - 1);
1363                 else
1364                         m->m_pkthdr.flowid = rq->queue_index;
1365                 m->m_flags |= M_FLOWID;
1366 #endif
1367                 /* This deternies if vlan tag is Valid */
1368                 if (oce_cqe_vtp_valid(sc, cqe)) { 
1369                         if (sc->function_mode & FNM_FLEX10_MODE) {
1370                                 /* FLEX10. If QnQ is not set, neglect VLAN */
1371                                 if (cqe->u0.s.qnq) {
1372                                         m->m_pkthdr.ether_vtag = vtag;
1373                                         m->m_flags |= M_VLANTAG;
1374                                 }
1375                         } else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1376                                 /* In UMC mode generally pvid will be striped by
1377                                    hw. But in some cases we have seen it comes
1378                                    with pvid. So if pvid == vlan, neglect vlan.
1379                                 */
1380                                 m->m_pkthdr.ether_vtag = vtag;
1381                                 m->m_flags |= M_VLANTAG;
1382                         }
1383                 }
1384
1385                 sc->ifp->if_ipackets++;
1386 #if defined(INET6) || defined(INET)
1387                 /* Try to queue to LRO */
1388                 if (IF_LRO_ENABLED(sc) &&
1389                     (cqe->u0.s.ip_cksum_pass) &&
1390                     (cqe->u0.s.l4_cksum_pass) &&
1391                     (!cqe->u0.s.ip_ver)       &&
1392                     (rq->lro.lro_cnt != 0)) {
1393
1394                         if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1395                                 rq->lro_pkts_queued ++;         
1396                                 goto post_done;
1397                         }
1398                         /* If LRO posting fails then try to post to STACK */
1399                 }
1400 #endif
1401         
1402                 (*sc->ifp->if_input) (sc->ifp, m);
1403 #if defined(INET6) || defined(INET)
1404 post_done:
1405 #endif
1406                 /* Update rx stats per queue */
1407                 rq->rx_stats.rx_pkts++;
1408                 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1409                 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1410                 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1411                         rq->rx_stats.rx_mcast_pkts++;
1412                 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1413                         rq->rx_stats.rx_ucast_pkts++;
1414         }
1415 exit:
1416         return;
1417 }
1418
1419
1420 static void
1421 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1422 {
1423         uint32_t out, i = 0;
1424         struct oce_packet_desc *pd;
1425         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1426         int num_frags = cqe->u0.s.num_fragments;
1427
1428         for (i = 0; i < num_frags; i++) {
1429                 if (rq->packets_out == rq->packets_in) {
1430                         device_printf(sc->dev,
1431                                 "RQ transmit descriptor missing\n");
1432                 }
1433                 out = rq->packets_out + 1;
1434                 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1435                         out = 0;
1436                 pd = &rq->pckts[rq->packets_out];
1437                 rq->packets_out = out;
1438
1439                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1440                 bus_dmamap_unload(rq->tag, pd->map);
1441                 rq->pending--;
1442                 m_freem(pd->mbuf);
1443         }
1444
1445 }
1446
1447
1448 static int
1449 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1450 {
1451         struct oce_nic_rx_cqe_v1 *cqe_v1;
1452         int vtp = 0;
1453
1454         if (sc->be3_native) {
1455                 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1456                 vtp =  cqe_v1->u0.s.vlan_tag_present; 
1457         } else
1458                 vtp = cqe->u0.s.vlan_tag_present;
1459         
1460         return vtp;
1461
1462 }
1463
1464
1465 static int
1466 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1467 {
1468         struct oce_nic_rx_cqe_v1 *cqe_v1;
1469         int port_id = 0;
1470
1471         if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) {
1472                 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1473                 port_id =  cqe_v1->u0.s.port;
1474                 if (sc->port_id != port_id)
1475                         return 0;
1476         } else
1477                 ;/* For BE3 legacy and Lancer this is dummy */
1478         
1479         return 1;
1480
1481 }
1482
1483 #if defined(INET6) || defined(INET)
1484 static void
1485 oce_rx_flush_lro(struct oce_rq *rq)
1486 {
1487         struct lro_ctrl *lro = &rq->lro;
1488         struct lro_entry *queued;
1489         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1490
1491         if (!IF_LRO_ENABLED(sc))
1492                 return;
1493
1494         while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1495                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1496                 tcp_lro_flush(lro, queued);
1497         }
1498         rq->lro_pkts_queued = 0;
1499         
1500         return;
1501 }
1502
1503
1504 static int
1505 oce_init_lro(POCE_SOFTC sc)
1506 {
1507         struct lro_ctrl *lro = NULL;
1508         int i = 0, rc = 0;
1509
1510         for (i = 0; i < sc->nrqs; i++) { 
1511                 lro = &sc->rq[i]->lro;
1512                 rc = tcp_lro_init(lro);
1513                 if (rc != 0) {
1514                         device_printf(sc->dev, "LRO init failed\n");
1515                         return rc;              
1516                 }
1517                 lro->ifp = sc->ifp;
1518         }
1519
1520         return rc;              
1521 }
1522
1523
1524 void
1525 oce_free_lro(POCE_SOFTC sc)
1526 {
1527         struct lro_ctrl *lro = NULL;
1528         int i = 0;
1529
1530         for (i = 0; i < sc->nrqs; i++) {
1531                 lro = &sc->rq[i]->lro;
1532                 if (lro)
1533                         tcp_lro_free(lro);
1534         }
1535 }
1536 #endif
1537
1538 int
1539 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1540 {
1541         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1542         int i, in, rc;
1543         struct oce_packet_desc *pd;
1544         bus_dma_segment_t segs[6];
1545         int nsegs, added = 0;
1546         struct oce_nic_rqe *rqe;
1547         pd_rxulp_db_t rxdb_reg;
1548
1549         bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
1550         for (i = 0; i < count; i++) {
1551                 in = rq->packets_in + 1;
1552                 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1553                         in = 0;
1554                 if (in == rq->packets_out)
1555                         break;  /* no more room */
1556
1557                 pd = &rq->pckts[rq->packets_in];
1558                 pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1559                 if (pd->mbuf == NULL)
1560                         break;
1561
1562                 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1563                 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1564                                              pd->map,
1565                                              pd->mbuf,
1566                                              segs, &nsegs, BUS_DMA_NOWAIT);
1567                 if (rc) {
1568                         m_free(pd->mbuf);
1569                         break;
1570                 }
1571
1572                 if (nsegs != 1) {
1573                         i--;
1574                         continue;
1575                 }
1576
1577                 rq->packets_in = in;
1578                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1579
1580                 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1581                 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1582                 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1583                 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1584                 RING_PUT(rq->ring, 1);
1585                 added++;
1586                 rq->pending++;
1587         }
1588         if (added != 0) {
1589                 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1590                         rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1591                         rxdb_reg.bits.qid = rq->rq_id;
1592                         OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1593                         added -= OCE_MAX_RQ_POSTS;
1594                 }
1595                 if (added > 0) {
1596                         rxdb_reg.bits.qid = rq->rq_id;
1597                         rxdb_reg.bits.num_posted = added;
1598                         OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1599                 }
1600         }
1601         
1602         return 0;       
1603 }
1604
1605
1606 /* Handle the Completion Queue for receive */
1607 uint16_t
1608 oce_rq_handler(void *arg)
1609 {
1610         struct oce_rq *rq = (struct oce_rq *)arg;
1611         struct oce_cq *cq = rq->cq;
1612         POCE_SOFTC sc = rq->parent;
1613         struct oce_nic_rx_cqe *cqe;
1614         int num_cqes = 0, rq_buffers_used = 0;
1615
1616
1617         bus_dmamap_sync(cq->ring->dma.tag,
1618                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1619         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1620         while (cqe->u0.dw[2]) {
1621                 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1622
1623                 RING_GET(rq->ring, 1);
1624                 if (cqe->u0.s.error == 0) {
1625                         oce_rx(rq, cqe->u0.s.frag_index, cqe);
1626                 } else {
1627                         rq->rx_stats.rxcp_err++;
1628                         sc->ifp->if_ierrors++;
1629                         /* Post L3/L4 errors to stack.*/
1630                         oce_rx(rq, cqe->u0.s.frag_index, cqe);
1631                 }
1632                 rq->rx_stats.rx_compl++;
1633                 cqe->u0.dw[2] = 0;
1634
1635 #if defined(INET6) || defined(INET)
1636                 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1637                         oce_rx_flush_lro(rq);
1638                 }
1639 #endif
1640
1641                 RING_GET(cq->ring, 1);
1642                 bus_dmamap_sync(cq->ring->dma.tag,
1643                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1644                 cqe =
1645                     RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1646                 num_cqes++;
1647                 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1648                         break;
1649         }
1650
1651 #if defined(INET6) || defined(INET)
1652         if (IF_LRO_ENABLED(sc))
1653                 oce_rx_flush_lro(rq);
1654 #endif
1655         
1656         if (num_cqes) {
1657                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1658                 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1659                 if (rq_buffers_used > 1)
1660                         oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1661         }
1662
1663         return 0;
1664
1665 }
1666
1667
1668
1669
1670 /*****************************************************************************
1671  *                 Helper function prototypes in this file                   *
1672  *****************************************************************************/
1673
1674 static int 
1675 oce_attach_ifp(POCE_SOFTC sc)
1676 {
1677
1678         sc->ifp = if_alloc(IFT_ETHER);
1679         if (!sc->ifp)
1680                 return ENOMEM;
1681
1682         ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1683         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1684         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1685
1686         sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1687         sc->ifp->if_ioctl = oce_ioctl;
1688         sc->ifp->if_start = oce_start;
1689         sc->ifp->if_init = oce_init;
1690         sc->ifp->if_mtu = ETHERMTU;
1691         sc->ifp->if_softc = sc;
1692 #if __FreeBSD_version >= 800000
1693         sc->ifp->if_transmit = oce_multiq_start;
1694         sc->ifp->if_qflush = oce_multiq_flush;
1695 #endif
1696
1697         if_initname(sc->ifp,
1698                     device_get_name(sc->dev), device_get_unit(sc->dev));
1699
1700         sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1701         IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1702         IFQ_SET_READY(&sc->ifp->if_snd);
1703
1704         sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1705         sc->ifp->if_hwassist |= CSUM_TSO;
1706         sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1707
1708         sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1709         sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1710         sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1711
1712 #if defined(INET6) || defined(INET)
1713         sc->ifp->if_capabilities |= IFCAP_TSO;
1714         sc->ifp->if_capabilities |= IFCAP_LRO;
1715         sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1716 #endif
1717         
1718         sc->ifp->if_capenable = sc->ifp->if_capabilities;
1719         sc->ifp->if_baudrate = IF_Gbps(10UL);
1720
1721 #if __FreeBSD_version >= 1000000
1722         sc->ifp->if_hw_tsomax = OCE_MAX_TSO_SIZE;
1723 #endif
1724
1725         ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1726         
1727         return 0;
1728 }
1729
1730
1731 static void
1732 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1733 {
1734         POCE_SOFTC sc = ifp->if_softc;
1735
1736         if (ifp->if_softc !=  arg)
1737                 return;
1738         if ((vtag == 0) || (vtag > 4095))
1739                 return;
1740
1741         sc->vlan_tag[vtag] = 1;
1742         sc->vlans_added++;
1743         if (sc->vlans_added <= (sc->max_vlans + 1))
1744                 oce_vid_config(sc);
1745 }
1746
1747
1748 static void
1749 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1750 {
1751         POCE_SOFTC sc = ifp->if_softc;
1752
1753         if (ifp->if_softc !=  arg)
1754                 return;
1755         if ((vtag == 0) || (vtag > 4095))
1756                 return;
1757
1758         sc->vlan_tag[vtag] = 0;
1759         sc->vlans_added--;
1760         oce_vid_config(sc);
1761 }
1762
1763
1764 /*
1765  * A max of 64 vlans can be configured in BE. If the user configures
1766  * more, place the card in vlan promiscuous mode.
1767  */
1768 static int
1769 oce_vid_config(POCE_SOFTC sc)
1770 {
1771         struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1772         uint16_t ntags = 0, i;
1773         int status = 0;
1774
1775         if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) && 
1776                         (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1777                 for (i = 0; i < MAX_VLANS; i++) {
1778                         if (sc->vlan_tag[i]) {
1779                                 vtags[ntags].vtag = i;
1780                                 ntags++;
1781                         }
1782                 }
1783                 if (ntags)
1784                         status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1785                                                 vtags, ntags, 1, 0); 
1786         } else 
1787                 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1788                                                 NULL, 0, 1, 1);
1789         return status;
1790 }
1791
1792
1793 static void
1794 oce_mac_addr_set(POCE_SOFTC sc)
1795 {
1796         uint32_t old_pmac_id = sc->pmac_id;
1797         int status = 0;
1798
1799         
1800         status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1801                          sc->macaddr.size_of_struct);
1802         if (!status)
1803                 return;
1804
1805         status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1806                                         sc->if_id, &sc->pmac_id);
1807         if (!status) {
1808                 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1809                 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1810                                  sc->macaddr.size_of_struct); 
1811         }
1812         if (status)
1813                 device_printf(sc->dev, "Failed update macaddress\n");
1814
1815 }
1816
1817
1818 static int
1819 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1820 {
1821         POCE_SOFTC sc = ifp->if_softc;
1822         struct ifreq *ifr = (struct ifreq *)data;
1823         int rc = ENXIO;
1824         char cookie[32] = {0};
1825         void *priv_data = (void *)ifr->ifr_data;
1826         void *ioctl_ptr;
1827         uint32_t req_size;
1828         struct mbx_hdr req;
1829         OCE_DMA_MEM dma_mem;
1830         struct mbx_common_get_cntl_attr *fw_cmd;
1831
1832         if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1833                 return EFAULT;
1834
1835         if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1836                 return EINVAL;
1837
1838         ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1839         if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1840                 return EFAULT;
1841
1842         req_size = le32toh(req.u0.req.request_length);
1843         if (req_size > 65536)
1844                 return EINVAL;
1845
1846         req_size += sizeof(struct mbx_hdr);
1847         rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1848         if (rc)
1849                 return ENOMEM;
1850
1851         if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1852                 rc = EFAULT;
1853                 goto dma_free;
1854         }
1855
1856         rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1857         if (rc) {
1858                 rc = EIO;
1859                 goto dma_free;
1860         }
1861
1862         if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1863                 rc =  EFAULT;
1864
1865         /* 
1866            firmware is filling all the attributes for this ioctl except
1867            the driver version..so fill it 
1868          */
1869         if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) {
1870                 fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr;
1871                 strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str,
1872                         COMPONENT_REVISION, strlen(COMPONENT_REVISION));        
1873         }
1874
1875 dma_free:
1876         oce_dma_free(sc, &dma_mem);
1877         return rc;
1878
1879 }
1880
1881 static void
1882 oce_eqd_set_periodic(POCE_SOFTC sc)
1883 {
1884         struct oce_set_eqd set_eqd[OCE_MAX_EQ];
1885         struct oce_aic_obj *aic;
1886         struct oce_eq *eqo;
1887         uint64_t now = 0, delta;
1888         int eqd, i, num = 0;
1889         uint32_t ips = 0;
1890         int tps;
1891
1892         for (i = 0 ; i < sc->neqs; i++) {
1893                 eqo = sc->eq[i];
1894                 aic = &sc->aic_obj[i];
1895                 /* When setting the static eq delay from the user space */
1896                 if (!aic->enable) {
1897                         eqd = aic->et_eqd;
1898                         goto modify_eqd;
1899                 }
1900
1901                 now = ticks;
1902
1903                 /* Over flow check */
1904                 if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
1905                         goto done;
1906
1907                 delta = now - aic->ticks;
1908                 tps = delta/hz;
1909
1910                 /* Interrupt rate based on elapsed ticks */
1911                 if(tps)
1912                         ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
1913
1914                 if (ips > INTR_RATE_HWM)
1915                         eqd = aic->cur_eqd + 20;
1916                 else if (ips < INTR_RATE_LWM)
1917                         eqd = aic->cur_eqd / 2;
1918                 else
1919                         goto done;
1920
1921                 if (eqd < 10)
1922                         eqd = 0;
1923
1924                 /* Make sure that the eq delay is in the known range */
1925                 eqd = min(eqd, aic->max_eqd);
1926                 eqd = max(eqd, aic->min_eqd);
1927
1928 modify_eqd:
1929                 if (eqd != aic->cur_eqd) {
1930                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1931                         set_eqd[num].eq_id = eqo->eq_id;
1932                         aic->cur_eqd = eqd;
1933                         num++;
1934                 }
1935 done:
1936                 aic->intr_prev = eqo->intr;
1937                 aic->ticks = now;
1938         }
1939
1940         /* Is there atleast one eq that needs to be modified? */
1941         if(num)
1942                 oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
1943
1944 }
1945
1946 static void oce_detect_hw_error(POCE_SOFTC sc)
1947 {
1948
1949         uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0;
1950         uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1951         uint32_t i;
1952
1953         if (sc->hw_error)
1954                 return;
1955
1956         if (IS_XE201(sc)) {
1957                 sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET);
1958                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1959                         sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET);
1960                         sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET);
1961                 }
1962         } else {
1963                 ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW);
1964                 ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH);
1965                 ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK);
1966                 ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK);
1967
1968                 ue_low = (ue_low & ~ue_low_mask);
1969                 ue_high = (ue_high & ~ue_high_mask);
1970         }
1971
1972         /* On certain platforms BE hardware can indicate spurious UEs.
1973          * Allow the h/w to stop working completely in case of a real UE.
1974          * Hence not setting the hw_error for UE detection.
1975          */
1976         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1977                 sc->hw_error = TRUE;
1978                 device_printf(sc->dev, "Error detected in the card\n");
1979         }
1980
1981         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1982                 device_printf(sc->dev,
1983                                 "ERR: sliport status 0x%x\n", sliport_status);
1984                 device_printf(sc->dev,
1985                                 "ERR: sliport error1 0x%x\n", sliport_err1);
1986                 device_printf(sc->dev,
1987                                 "ERR: sliport error2 0x%x\n", sliport_err2);
1988         }
1989
1990         if (ue_low) {
1991                 for (i = 0; ue_low; ue_low >>= 1, i++) {
1992                         if (ue_low & 1)
1993                                 device_printf(sc->dev, "UE: %s bit set\n",
1994                                                         ue_status_low_desc[i]);
1995                 }
1996         }
1997
1998         if (ue_high) {
1999                 for (i = 0; ue_high; ue_high >>= 1, i++) {
2000                         if (ue_high & 1)
2001                                 device_printf(sc->dev, "UE: %s bit set\n",
2002                                                         ue_status_hi_desc[i]);
2003                 }
2004         }
2005
2006 }
2007
2008
2009 static void
2010 oce_local_timer(void *arg)
2011 {
2012         POCE_SOFTC sc = arg;
2013         int i = 0;
2014         
2015         oce_detect_hw_error(sc);
2016         oce_refresh_nic_stats(sc);
2017         oce_refresh_queue_stats(sc);
2018         oce_mac_addr_set(sc);
2019         
2020         /* TX Watch Dog*/
2021         for (i = 0; i < sc->nwqs; i++)
2022                 oce_tx_restart(sc, sc->wq[i]);
2023         
2024         /* calculate and set the eq delay for optimal interrupt rate */
2025         if (IS_BE(sc) || IS_SH(sc))
2026                 oce_eqd_set_periodic(sc);
2027
2028         callout_reset(&sc->timer, hz, oce_local_timer, sc);
2029 }
2030
2031
2032 /* NOTE : This should only be called holding
2033  *        DEVICE_LOCK.
2034  */
2035 static void
2036 oce_if_deactivate(POCE_SOFTC sc)
2037 {
2038         int i, mtime = 0;
2039         int wait_req = 0;
2040         struct oce_rq *rq;
2041         struct oce_wq *wq;
2042         struct oce_eq *eq;
2043
2044         sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2045
2046         /*Wait for max of 400ms for TX completions to be done */
2047         while (mtime < 400) {
2048                 wait_req = 0;
2049                 for_all_wq_queues(sc, wq, i) {
2050                         if (wq->ring->num_used) {
2051                                 wait_req = 1;
2052                                 DELAY(1);
2053                                 break;
2054                         }
2055                 }
2056                 mtime += 1;
2057                 if (!wait_req)
2058                         break;
2059         }
2060
2061         /* Stop intrs and finish any bottom halves pending */
2062         oce_hw_intr_disable(sc);
2063
2064         /* Since taskqueue_drain takes a Gaint Lock, We should not acquire
2065            any other lock. So unlock device lock and require after
2066            completing taskqueue_drain.
2067         */
2068         UNLOCK(&sc->dev_lock);
2069         for (i = 0; i < sc->intr_count; i++) {
2070                 if (sc->intrs[i].tq != NULL) {
2071                         taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
2072                 }
2073         }
2074         LOCK(&sc->dev_lock);
2075
2076         /* Delete RX queue in card with flush param */
2077         oce_stop_rx(sc);
2078
2079         /* Invalidate any pending cq and eq entries*/   
2080         for_all_evnt_queues(sc, eq, i)  
2081                 oce_drain_eq(eq);
2082         for_all_rq_queues(sc, rq, i)
2083                 oce_drain_rq_cq(rq);
2084         for_all_wq_queues(sc, wq, i)
2085                 oce_drain_wq_cq(wq);
2086
2087         /* But still we need to get MCC aync events.
2088            So enable intrs and also arm first EQ
2089         */
2090         oce_hw_intr_enable(sc);
2091         oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
2092
2093         DELAY(10);
2094 }
2095
2096
2097 static void
2098 oce_if_activate(POCE_SOFTC sc)
2099 {
2100         struct oce_eq *eq;
2101         struct oce_rq *rq;
2102         struct oce_wq *wq;
2103         int i, rc = 0;
2104
2105         sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; 
2106         
2107         oce_hw_intr_disable(sc);
2108         
2109         oce_start_rx(sc);
2110
2111         for_all_rq_queues(sc, rq, i) {
2112                 rc = oce_start_rq(rq);
2113                 if (rc)
2114                         device_printf(sc->dev, "Unable to start RX\n");
2115         }
2116
2117         for_all_wq_queues(sc, wq, i) {
2118                 rc = oce_start_wq(wq);
2119                 if (rc)
2120                         device_printf(sc->dev, "Unable to start TX\n");
2121         }
2122
2123         
2124         for_all_evnt_queues(sc, eq, i)
2125                 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
2126
2127         oce_hw_intr_enable(sc);
2128
2129 }
2130
2131 static void
2132 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
2133 {
2134         /* Update Link status */
2135         if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
2136              ASYNC_EVENT_LINK_UP) {
2137                 sc->link_status = ASYNC_EVENT_LINK_UP;
2138                 if_link_state_change(sc->ifp, LINK_STATE_UP);
2139         } else {
2140                 sc->link_status = ASYNC_EVENT_LINK_DOWN;
2141                 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2142         }
2143
2144         /* Update speed */
2145         sc->link_speed = acqe->u0.s.speed;
2146         sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
2147
2148 }
2149
2150
2151 /* Handle the Completion Queue for the Mailbox/Async notifications */
2152 uint16_t
2153 oce_mq_handler(void *arg)
2154 {
2155         struct oce_mq *mq = (struct oce_mq *)arg;
2156         POCE_SOFTC sc = mq->parent;
2157         struct oce_cq *cq = mq->cq;
2158         int num_cqes = 0, evt_type = 0, optype = 0;
2159         struct oce_mq_cqe *cqe;
2160         struct oce_async_cqe_link_state *acqe;
2161         struct oce_async_event_grp5_pvid_state *gcqe;
2162         struct oce_async_event_qnq *dbgcqe;
2163
2164
2165         bus_dmamap_sync(cq->ring->dma.tag,
2166                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2167         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2168
2169         while (cqe->u0.dw[3]) {
2170                 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
2171                 if (cqe->u0.s.async_event) {
2172                         evt_type = cqe->u0.s.event_type;
2173                         optype = cqe->u0.s.async_type;
2174                         if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
2175                                 /* Link status evt */
2176                                 acqe = (struct oce_async_cqe_link_state *)cqe;
2177                                 process_link_state(sc, acqe);
2178                         } else if ((evt_type == ASYNC_EVENT_GRP5) &&
2179                                    (optype == ASYNC_EVENT_PVID_STATE)) {
2180                                 /* GRP5 PVID */
2181                                 gcqe = 
2182                                 (struct oce_async_event_grp5_pvid_state *)cqe;
2183                                 if (gcqe->enabled)
2184                                         sc->pvid = gcqe->tag & VLAN_VID_MASK;
2185                                 else
2186                                         sc->pvid = 0;
2187                                 
2188                         }
2189                         else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
2190                                 optype == ASYNC_EVENT_DEBUG_QNQ) {
2191                                 dbgcqe = 
2192                                 (struct oce_async_event_qnq *)cqe;
2193                                 if(dbgcqe->valid)
2194                                         sc->qnqid = dbgcqe->vlan_tag;
2195                                 sc->qnq_debug_event = TRUE;
2196                         }
2197                 }
2198                 cqe->u0.dw[3] = 0;
2199                 RING_GET(cq->ring, 1);
2200                 bus_dmamap_sync(cq->ring->dma.tag,
2201                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
2202                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
2203                 num_cqes++;
2204         }
2205
2206         if (num_cqes)
2207                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
2208
2209         return 0;
2210 }
2211
2212
2213 static void
2214 setup_max_queues_want(POCE_SOFTC sc)
2215 {
2216         /* Check if it is FLEX machine. Is so dont use RSS */   
2217         if ((sc->function_mode & FNM_FLEX10_MODE) ||
2218             (sc->function_mode & FNM_UMC_MODE)    ||
2219             (sc->function_mode & FNM_VNIC_MODE)   ||
2220             (!is_rss_enabled(sc))                 ||
2221             (sc->flags & OCE_FLAGS_BE2)) {
2222                 sc->nrqs = 1;
2223                 sc->nwqs = 1;
2224         } else {
2225                 sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1;
2226                 sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs);
2227         }
2228 }
2229
2230
2231 static void
2232 update_queues_got(POCE_SOFTC sc)
2233 {
2234         if (is_rss_enabled(sc)) {
2235                 sc->nrqs = sc->intr_count + 1;
2236                 sc->nwqs = sc->intr_count;
2237         } else {
2238                 sc->nrqs = 1;
2239                 sc->nwqs = 1;
2240         }
2241 }
2242
2243 static int 
2244 oce_check_ipv6_ext_hdr(struct mbuf *m)
2245 {
2246         struct ether_header *eh = mtod(m, struct ether_header *);
2247         caddr_t m_datatemp = m->m_data;
2248
2249         if (eh->ether_type == htons(ETHERTYPE_IPV6)) {
2250                 m->m_data += sizeof(struct ether_header);
2251                 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
2252
2253                 if((ip6->ip6_nxt != IPPROTO_TCP) && \
2254                                 (ip6->ip6_nxt != IPPROTO_UDP)){
2255                         struct ip6_ext *ip6e = NULL;
2256                         m->m_data += sizeof(struct ip6_hdr);
2257
2258                         ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *);
2259                         if(ip6e->ip6e_len == 0xff) {
2260                                 m->m_data = m_datatemp;
2261                                 return TRUE;
2262                         }
2263                 } 
2264                 m->m_data = m_datatemp;
2265         }
2266         return FALSE;
2267 }
2268
2269 static int 
2270 is_be3_a1(POCE_SOFTC sc)
2271 {
2272         if((sc->flags & OCE_FLAGS_BE3)  && ((sc->asic_revision & 0xFF) < 2)) {
2273                 return TRUE;
2274         }
2275         return FALSE;
2276 }
2277
2278 static struct mbuf *
2279 oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
2280 {
2281         uint16_t vlan_tag = 0;
2282
2283         if(!M_WRITABLE(m))
2284                 return NULL;
2285
2286         /* Embed vlan tag in the packet if it is not part of it */
2287         if(m->m_flags & M_VLANTAG) {
2288                 vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag);
2289                 m->m_flags &= ~M_VLANTAG;
2290         }
2291
2292         /* if UMC, ignore vlan tag insertion and instead insert pvid */
2293         if(sc->pvid) {
2294                 if(!vlan_tag)
2295                         vlan_tag = sc->pvid;
2296                 *complete = FALSE;
2297         }
2298
2299         if(vlan_tag) {
2300                 m = ether_vlanencap(m, vlan_tag);
2301         }
2302
2303         if(sc->qnqid) {
2304                 m = ether_vlanencap(m, sc->qnqid);
2305                 *complete = FALSE;
2306         }
2307         return m;
2308 }
2309
2310 static int 
2311 oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m)
2312 {
2313         if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \
2314                         oce_check_ipv6_ext_hdr(m)) {
2315                 return TRUE;
2316         }
2317         return FALSE;
2318 }
2319
2320 static void
2321 oce_get_config(POCE_SOFTC sc)
2322 {
2323         int rc = 0;
2324         uint32_t max_rss = 0;
2325
2326         if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native))
2327                 max_rss = OCE_LEGACY_MODE_RSS;
2328         else
2329                 max_rss = OCE_MAX_RSS;
2330
2331         if (!IS_BE(sc)) {
2332                 rc = oce_get_func_config(sc);
2333                 if (rc) {
2334                         sc->nwqs = OCE_MAX_WQ;
2335                         sc->nrssqs = max_rss;
2336                         sc->nrqs = sc->nrssqs + 1;
2337                 }
2338         }
2339         else {
2340                 rc = oce_get_profile_config(sc);
2341                 sc->nrssqs = max_rss;
2342                 sc->nrqs = sc->nrssqs + 1;
2343                 if (rc)
2344                         sc->nwqs = OCE_MAX_WQ;
2345         }
2346 }