]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/oce/oce_if.c
Use pci_find_cap() instead of pci_find_extcap() to locate PCI
[FreeBSD/FreeBSD.git] / sys / dev / oce / oce_if.c
1 /*-
2  * Copyright (C) 2012 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38
39 /* $FreeBSD$ */
40
41 #include "opt_inet6.h"
42 #include "opt_inet.h"
43
44 #include "oce_if.h"
45
46
47 /* Driver entry points prototypes */
48 static int  oce_probe(device_t dev);
49 static int  oce_attach(device_t dev);
50 static int  oce_detach(device_t dev);
51 static int  oce_shutdown(device_t dev);
52 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
53 static void oce_init(void *xsc);
54 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
55 static void oce_multiq_flush(struct ifnet *ifp);
56
57 /* Driver interrupt routines protypes */
58 static void oce_intr(void *arg, int pending);
59 static int  oce_setup_intr(POCE_SOFTC sc);
60 static int  oce_fast_isr(void *arg);
61 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
62                           void (*isr) (void *arg, int pending));
63
64 /* Media callbacks prototypes */
65 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
66 static int  oce_media_change(struct ifnet *ifp);
67
68 /* Transmit routines prototypes */
69 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
70 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
71 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
72                                         uint32_t status);
73 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
74                                  struct oce_wq *wq);
75
76 /* Receive routines prototypes */
77 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
78 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
79 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
80 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
81                                                 struct oce_nic_rx_cqe *cqe);
82
83 /* Helper function prototypes in this file */
84 static int  oce_attach_ifp(POCE_SOFTC sc);
85 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
86 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
87 static int  oce_vid_config(POCE_SOFTC sc);
88 static void oce_mac_addr_set(POCE_SOFTC sc);
89 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
90 static void oce_local_timer(void *arg);
91 static void oce_if_deactivate(POCE_SOFTC sc);
92 static void oce_if_activate(POCE_SOFTC sc);
93 static void setup_max_queues_want(POCE_SOFTC sc);
94 static void update_queues_got(POCE_SOFTC sc);
95 static void process_link_state(POCE_SOFTC sc,
96                  struct oce_async_cqe_link_state *acqe);
97
98
99 /* IP specific */
100 #if defined(INET6) || defined(INET)
101 static int  oce_init_lro(POCE_SOFTC sc);
102 static void oce_rx_flush_lro(struct oce_rq *rq);
103 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
104 #endif
105
106 static device_method_t oce_dispatch[] = {
107         DEVMETHOD(device_probe, oce_probe),
108         DEVMETHOD(device_attach, oce_attach),
109         DEVMETHOD(device_detach, oce_detach),
110         DEVMETHOD(device_shutdown, oce_shutdown),
111         {0, 0}
112 };
113
114 static driver_t oce_driver = {
115         "oce",
116         oce_dispatch,
117         sizeof(OCE_SOFTC)
118 };
119 static devclass_t oce_devclass;
120
121
122 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
123 MODULE_DEPEND(oce, pci, 1, 1, 1);
124 MODULE_DEPEND(oce, ether, 1, 1, 1);
125 MODULE_VERSION(oce, 1);
126
127
128 /* global vars */
129 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
130
131 /* Module capabilites and parameters */
132 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
133 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
134
135
136 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
137 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
138
139
140 /* Supported devices table */
141 static uint32_t supportedDevices[] =  {
142         (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
143         (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
144         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
145         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
146         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
147 };
148
149
150
151
152 /*****************************************************************************
153  *                      Driver entry points functions                        *
154  *****************************************************************************/
155
156 static int
157 oce_probe(device_t dev)
158 {
159         uint16_t vendor = 0;
160         uint16_t device = 0;
161         int i = 0;
162         char str[256] = {0};
163         POCE_SOFTC sc;
164
165         sc = device_get_softc(dev);
166         bzero(sc, sizeof(OCE_SOFTC));
167         sc->dev = dev;
168
169         vendor = pci_get_vendor(dev);
170         device = pci_get_device(dev);
171
172         for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
173                 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
174                         if (device == (supportedDevices[i] & 0xffff)) {
175                                 sprintf(str, "%s:%s", "Emulex CNA NIC function",
176                                         component_revision);
177                                 device_set_desc_copy(dev, str);
178
179                                 switch (device) {
180                                 case PCI_PRODUCT_BE2:
181                                         sc->flags |= OCE_FLAGS_BE2;
182                                         break;
183                                 case PCI_PRODUCT_BE3:
184                                         sc->flags |= OCE_FLAGS_BE3;
185                                         break;
186                                 case PCI_PRODUCT_XE201:
187                                 case PCI_PRODUCT_XE201_VF:
188                                         sc->flags |= OCE_FLAGS_XE201;
189                                         break;
190                                 default:
191                                         return ENXIO;
192                                 }
193                                 return BUS_PROBE_DEFAULT;
194                         }
195                 }
196         }
197
198         return ENXIO;
199 }
200
201
202 static int
203 oce_attach(device_t dev)
204 {
205         POCE_SOFTC sc;
206         int rc = 0;
207
208         sc = device_get_softc(dev);
209
210         rc = oce_hw_pci_alloc(sc);
211         if (rc)
212                 return rc;
213
214         sc->rss_enable   = oce_enable_rss;
215         sc->tx_ring_size = OCE_TX_RING_SIZE;
216         sc->rx_ring_size = OCE_RX_RING_SIZE;
217         sc->rq_frag_size = OCE_RQ_BUF_SIZE;
218         sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
219         sc->promisc      = OCE_DEFAULT_PROMISCUOUS;
220
221         LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
222         LOCK_CREATE(&sc->dev_lock,  "Device_lock");
223
224         /* initialise the hardware */
225         rc = oce_hw_init(sc);
226         if (rc)
227                 goto pci_res_free;
228
229         setup_max_queues_want(sc);      
230
231         rc = oce_setup_intr(sc);
232         if (rc)
233                 goto mbox_free;
234
235         rc = oce_queue_init_all(sc);
236         if (rc)
237                 goto intr_free;
238
239         rc = oce_attach_ifp(sc);
240         if (rc)
241                 goto queues_free;
242
243 #if defined(INET6) || defined(INET)
244         rc = oce_init_lro(sc);
245         if (rc)
246                 goto ifp_free;
247 #endif
248
249         rc = oce_hw_start(sc);
250         if (rc)
251                 goto lro_free;;
252
253         sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
254                                 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
255         sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
256                                 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
257
258         rc = oce_stats_init(sc);
259         if (rc)
260                 goto vlan_free;
261
262         oce_add_sysctls(sc);
263
264         callout_init(&sc->timer, CALLOUT_MPSAFE);
265         rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
266         if (rc)
267                 goto stats_free;
268 #ifdef DEV_NETMAP
269 #endif /* DEV_NETMAP */
270
271         return 0;
272
273 stats_free:
274         callout_drain(&sc->timer);
275         oce_stats_free(sc);
276 vlan_free:
277         if (sc->vlan_attach)
278                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
279         if (sc->vlan_detach)
280                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
281         oce_hw_intr_disable(sc);
282 lro_free:
283 #if defined(INET6) || defined(INET)
284         oce_free_lro(sc);
285 ifp_free:
286 #endif
287         ether_ifdetach(sc->ifp);
288         if_free(sc->ifp);
289 queues_free:
290         oce_queue_release_all(sc);
291 intr_free:
292         oce_intr_free(sc);
293 mbox_free:
294         oce_dma_free(sc, &sc->bsmbx);
295 pci_res_free:
296         oce_hw_pci_free(sc);
297         LOCK_DESTROY(&sc->dev_lock);
298         LOCK_DESTROY(&sc->bmbx_lock);
299         return rc;
300
301 }
302
303
304 static int
305 oce_detach(device_t dev)
306 {
307         POCE_SOFTC sc = device_get_softc(dev);
308
309         LOCK(&sc->dev_lock);
310         oce_if_deactivate(sc);
311         UNLOCK(&sc->dev_lock);
312
313         callout_drain(&sc->timer);
314         
315         if (sc->vlan_attach != NULL)
316                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
317         if (sc->vlan_detach != NULL)
318                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
319
320         ether_ifdetach(sc->ifp);
321
322         if_free(sc->ifp);
323
324         oce_hw_shutdown(sc);
325
326         bus_generic_detach(dev);
327
328         return 0;
329 }
330
331
332 static int
333 oce_shutdown(device_t dev)
334 {
335         int rc;
336         
337         rc = oce_detach(dev);
338
339         return rc;      
340 }
341
342
343 static int
344 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
345 {
346         struct ifreq *ifr = (struct ifreq *)data;
347         POCE_SOFTC sc = ifp->if_softc;
348         int rc = 0;
349         uint32_t u;
350
351         switch (command) {
352
353         case SIOCGIFMEDIA:
354                 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
355                 break;
356
357         case SIOCSIFMTU:
358                 if (ifr->ifr_mtu > OCE_MAX_MTU)
359                         rc = EINVAL;
360                 else
361                         ifp->if_mtu = ifr->ifr_mtu;
362                 break;
363
364         case SIOCSIFFLAGS:
365                 if (ifp->if_flags & IFF_UP) {
366                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
367                                 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;       
368                                 oce_init(sc);
369                         }
370                         device_printf(sc->dev, "Interface Up\n");       
371                 } else {
372                         LOCK(&sc->dev_lock);
373
374                         sc->ifp->if_drv_flags &=
375                             ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
376                         oce_if_deactivate(sc);
377
378                         UNLOCK(&sc->dev_lock);
379
380                         device_printf(sc->dev, "Interface Down\n");
381                 }
382
383                 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
384                         sc->promisc = TRUE;
385                         oce_rxf_set_promiscuous(sc, sc->promisc);
386                 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
387                         sc->promisc = FALSE;
388                         oce_rxf_set_promiscuous(sc, sc->promisc);
389                 }
390
391                 break;
392
393         case SIOCADDMULTI:
394         case SIOCDELMULTI:
395                 rc = oce_hw_update_multicast(sc);
396                 if (rc)
397                         device_printf(sc->dev,
398                                 "Update multicast address failed\n");
399                 break;
400
401         case SIOCSIFCAP:
402                 u = ifr->ifr_reqcap ^ ifp->if_capenable;
403
404                 if (u & IFCAP_TXCSUM) {
405                         ifp->if_capenable ^= IFCAP_TXCSUM;
406                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
407                         
408                         if (IFCAP_TSO & ifp->if_capenable &&
409                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
410                                 ifp->if_capenable &= ~IFCAP_TSO;
411                                 ifp->if_hwassist &= ~CSUM_TSO;
412                                 if_printf(ifp,
413                                          "TSO disabled due to -txcsum.\n");
414                         }
415                 }
416
417                 if (u & IFCAP_RXCSUM)
418                         ifp->if_capenable ^= IFCAP_RXCSUM;
419
420                 if (u & IFCAP_TSO4) {
421                         ifp->if_capenable ^= IFCAP_TSO4;
422
423                         if (IFCAP_TSO & ifp->if_capenable) {
424                                 if (IFCAP_TXCSUM & ifp->if_capenable)
425                                         ifp->if_hwassist |= CSUM_TSO;
426                                 else {
427                                         ifp->if_capenable &= ~IFCAP_TSO;
428                                         ifp->if_hwassist &= ~CSUM_TSO;
429                                         if_printf(ifp,
430                                             "Enable txcsum first.\n");
431                                         rc = EAGAIN;
432                                 }
433                         } else
434                                 ifp->if_hwassist &= ~CSUM_TSO;
435                 }
436
437                 if (u & IFCAP_VLAN_HWTAGGING)
438                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
439
440                 if (u & IFCAP_VLAN_HWFILTER) {
441                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
442                         oce_vid_config(sc);
443                 }
444 #if defined(INET6) || defined(INET)
445                 if (u & IFCAP_LRO)
446                         ifp->if_capenable ^= IFCAP_LRO;
447 #endif
448
449                 break;
450
451         case SIOCGPRIVATE_0:
452                 rc = oce_handle_passthrough(ifp, data);
453                 break;
454         default:
455                 rc = ether_ioctl(ifp, command, data);
456                 break;
457         }
458
459         return rc;
460 }
461
462
463 static void
464 oce_init(void *arg)
465 {
466         POCE_SOFTC sc = arg;
467         
468         LOCK(&sc->dev_lock);
469
470         if (sc->ifp->if_flags & IFF_UP) {
471                 oce_if_deactivate(sc);
472                 oce_if_activate(sc);
473         }
474         
475         UNLOCK(&sc->dev_lock);
476
477 }
478
479
480 static int
481 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
482 {
483         POCE_SOFTC sc = ifp->if_softc;
484         struct oce_wq *wq = NULL;
485         int queue_index = 0;
486         int status = 0;
487         
488         if ((m->m_flags & M_FLOWID) != 0)
489                 queue_index = m->m_pkthdr.flowid % sc->nwqs;
490         
491         wq = sc->wq[queue_index];
492
493         if (TRY_LOCK(&wq->tx_lock)) {
494                 status = oce_multiq_transmit(ifp, m, wq);
495                 UNLOCK(&wq->tx_lock);
496         } else {
497                 status = drbr_enqueue(ifp, wq->br, m);          
498         }
499         return status;
500
501 }
502
503
504 static void
505 oce_multiq_flush(struct ifnet *ifp)
506 {
507         POCE_SOFTC sc = ifp->if_softc;
508         struct mbuf     *m;
509         int i = 0;
510
511         for (i = 0; i < sc->nwqs; i++) {
512                 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
513                         m_freem(m);
514         }
515         if_qflush(ifp);
516 }
517
518
519
520 /*****************************************************************************
521  *                   Driver interrupt routines functions                     *
522  *****************************************************************************/
523
524 static void
525 oce_intr(void *arg, int pending)
526 {
527
528         POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
529         POCE_SOFTC sc = ii->sc;
530         struct oce_eq *eq = ii->eq;
531         struct oce_eqe *eqe;
532         struct oce_cq *cq = NULL;
533         int i, num_eqes = 0;
534
535
536         bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
537                                  BUS_DMASYNC_POSTWRITE);
538         do {
539                 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
540                 if (eqe->evnt == 0)
541                         break;
542                 eqe->evnt = 0;
543                 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
544                                         BUS_DMASYNC_POSTWRITE);
545                 RING_GET(eq->ring, 1);
546                 num_eqes++;
547
548         } while (TRUE);
549         
550         if (!num_eqes)
551                 goto eq_arm; /* Spurious */
552
553         /* Clear EQ entries, but dont arm */
554         oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
555
556         /* Process TX, RX and MCC. But dont arm CQ*/
557         for (i = 0; i < eq->cq_valid; i++) {
558                 cq = eq->cq[i];
559                 (*cq->cq_handler)(cq->cb_arg);
560         }
561
562         /* Arm all cqs connected to this EQ */
563         for (i = 0; i < eq->cq_valid; i++) {
564                 cq = eq->cq[i];
565                 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
566         }
567
568 eq_arm:
569         oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
570         return;
571 }
572
573
574 static int
575 oce_setup_intr(POCE_SOFTC sc)
576 {
577         int rc = 0, use_intx = 0;
578         int vector = 0, req_vectors = 0;
579
580         if (sc->rss_enable)
581                 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
582         else
583                 req_vectors = 1;
584
585         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
586                 sc->intr_count = req_vectors;
587                 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
588                 if (rc != 0) {
589                         use_intx = 1;
590                         pci_release_msi(sc->dev);
591                 } else
592                         sc->flags |= OCE_FLAGS_USING_MSIX;
593         } else
594                 use_intx = 1;
595
596         if (use_intx)
597                 sc->intr_count = 1;
598
599         /* Scale number of queues based on intr we got */
600         update_queues_got(sc);
601
602         if (use_intx) {
603                 device_printf(sc->dev, "Using legacy interrupt\n");
604                 rc = oce_alloc_intr(sc, vector, oce_intr);
605                 if (rc)
606                         goto error;             
607         } else {
608                 for (; vector < sc->intr_count; vector++) {
609                         rc = oce_alloc_intr(sc, vector, oce_intr);
610                         if (rc)
611                                 goto error;
612                 }
613         }
614
615         return 0;
616 error:
617         oce_intr_free(sc);
618         return rc;
619 }
620
621
622 static int
623 oce_fast_isr(void *arg)
624 {
625         POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
626         POCE_SOFTC sc = ii->sc;
627
628         if (ii->eq == NULL)
629                 return FILTER_STRAY;
630
631         oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
632
633         taskqueue_enqueue_fast(ii->tq, &ii->task);
634
635         return FILTER_HANDLED;
636 }
637
638
639 static int
640 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
641 {
642         POCE_INTR_INFO ii = &sc->intrs[vector];
643         int rc = 0, rr;
644
645         if (vector >= OCE_MAX_EQ)
646                 return (EINVAL);
647
648         /* Set the resource id for the interrupt.
649          * MSIx is vector + 1 for the resource id,
650          * INTx is 0 for the resource id.
651          */
652         if (sc->flags & OCE_FLAGS_USING_MSIX)
653                 rr = vector + 1;
654         else
655                 rr = 0;
656         ii->intr_res = bus_alloc_resource_any(sc->dev,
657                                               SYS_RES_IRQ,
658                                               &rr, RF_ACTIVE|RF_SHAREABLE);
659         ii->irq_rr = rr;
660         if (ii->intr_res == NULL) {
661                 device_printf(sc->dev,
662                           "Could not allocate interrupt\n");
663                 rc = ENXIO;
664                 return rc;
665         }
666
667         TASK_INIT(&ii->task, 0, isr, ii);
668         ii->vector = vector;
669         sprintf(ii->task_name, "oce_task[%d]", ii->vector);
670         ii->tq = taskqueue_create_fast(ii->task_name,
671                         M_NOWAIT,
672                         taskqueue_thread_enqueue,
673                         &ii->tq);
674         taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
675                         device_get_nameunit(sc->dev));
676
677         ii->sc = sc;
678         rc = bus_setup_intr(sc->dev,
679                         ii->intr_res,
680                         INTR_TYPE_NET,
681                         oce_fast_isr, NULL, ii, &ii->tag);
682         return rc;
683
684 }
685
686
687 void
688 oce_intr_free(POCE_SOFTC sc)
689 {
690         int i = 0;
691         
692         for (i = 0; i < sc->intr_count; i++) {
693                 
694                 if (sc->intrs[i].tag != NULL)
695                         bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
696                                                 sc->intrs[i].tag);
697                 if (sc->intrs[i].tq != NULL)
698                         taskqueue_free(sc->intrs[i].tq);
699                 
700                 if (sc->intrs[i].intr_res != NULL)
701                         bus_release_resource(sc->dev, SYS_RES_IRQ,
702                                                 sc->intrs[i].irq_rr,
703                                                 sc->intrs[i].intr_res);
704                 sc->intrs[i].tag = NULL;
705                 sc->intrs[i].intr_res = NULL;
706         }
707
708         if (sc->flags & OCE_FLAGS_USING_MSIX)
709                 pci_release_msi(sc->dev);
710
711 }
712
713
714
715 /******************************************************************************
716 *                         Media callbacks functions                           *
717 ******************************************************************************/
718
719 static void
720 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
721 {
722         POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
723
724
725         req->ifm_status = IFM_AVALID;
726         req->ifm_active = IFM_ETHER;
727         
728         if (sc->link_status == 1)
729                 req->ifm_status |= IFM_ACTIVE;
730         else 
731                 return;
732         
733         switch (sc->link_speed) {
734         case 1: /* 10 Mbps */
735                 req->ifm_active |= IFM_10_T | IFM_FDX;
736                 sc->speed = 10;
737                 break;
738         case 2: /* 100 Mbps */
739                 req->ifm_active |= IFM_100_TX | IFM_FDX;
740                 sc->speed = 100;
741                 break;
742         case 3: /* 1 Gbps */
743                 req->ifm_active |= IFM_1000_T | IFM_FDX;
744                 sc->speed = 1000;
745                 break;
746         case 4: /* 10 Gbps */
747                 req->ifm_active |= IFM_10G_SR | IFM_FDX;
748                 sc->speed = 10000;
749                 break;
750         }
751         
752         return;
753 }
754
755
756 int
757 oce_media_change(struct ifnet *ifp)
758 {
759         return 0;
760 }
761
762
763
764
765 /*****************************************************************************
766  *                        Transmit routines functions                        *
767  *****************************************************************************/
768
769 static int
770 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
771 {
772         int rc = 0, i, retry_cnt = 0;
773         bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
774         struct mbuf *m, *m_temp;
775         struct oce_wq *wq = sc->wq[wq_index];
776         struct oce_packet_desc *pd;
777         uint32_t out;
778         struct oce_nic_hdr_wqe *nichdr;
779         struct oce_nic_frag_wqe *nicfrag;
780         int num_wqes;
781         uint32_t reg_value;
782
783         m = *mpp;
784         if (!m)
785                 return EINVAL;
786
787         if (!(m->m_flags & M_PKTHDR)) {
788                 rc = ENXIO;
789                 goto free_ret;
790         }
791
792         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
793                 /* consolidate packet buffers for TSO/LSO segment offload */
794 #if defined(INET6) || defined(INET)
795                 m = oce_tso_setup(sc, mpp);
796 #else
797                 m = NULL;
798 #endif
799                 if (m == NULL) {
800                         rc = ENXIO;
801                         goto free_ret;
802                 }
803         }
804
805         out = wq->packets_out + 1;
806         if (out == OCE_WQ_PACKET_ARRAY_SIZE)
807                 out = 0;
808         if (out == wq->packets_in)
809                 return EBUSY;
810
811         pd = &wq->pckts[wq->packets_out];
812 retry:
813         rc = bus_dmamap_load_mbuf_sg(wq->tag,
814                                      pd->map,
815                                      m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
816         if (rc == 0) {
817                 num_wqes = pd->nsegs + 1;
818                 if (IS_BE(sc)) {
819                         /*Dummy required only for BE3.*/
820                         if (num_wqes & 1)
821                                 num_wqes++;
822                 }
823                 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
824                         bus_dmamap_unload(wq->tag, pd->map);
825                         return EBUSY;
826                 }
827
828                 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
829                 pd->mbuf = m;
830                 wq->packets_out = out;
831
832                 nichdr =
833                     RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
834                 nichdr->u0.dw[0] = 0;
835                 nichdr->u0.dw[1] = 0;
836                 nichdr->u0.dw[2] = 0;
837                 nichdr->u0.dw[3] = 0;
838
839                 nichdr->u0.s.complete = 1;
840                 nichdr->u0.s.event = 1;
841                 nichdr->u0.s.crc = 1;
842                 nichdr->u0.s.forward = 0;
843                 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
844                 nichdr->u0.s.udpcs =
845                     (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
846                 nichdr->u0.s.tcpcs =
847                     (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
848                 nichdr->u0.s.num_wqe = num_wqes;
849                 nichdr->u0.s.total_length = m->m_pkthdr.len;
850                 if (m->m_flags & M_VLANTAG) {
851                         nichdr->u0.s.vlan = 1; /*Vlan present*/
852                         nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
853                 }
854                 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
855                         if (m->m_pkthdr.tso_segsz) {
856                                 nichdr->u0.s.lso = 1;
857                                 nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
858                         }
859                         if (!IS_BE(sc))
860                                 nichdr->u0.s.ipcs = 1;
861                 }
862
863                 RING_PUT(wq->ring, 1);
864                 wq->ring->num_used++;
865
866                 for (i = 0; i < pd->nsegs; i++) {
867                         nicfrag =
868                             RING_GET_PRODUCER_ITEM_VA(wq->ring,
869                                                       struct oce_nic_frag_wqe);
870                         nicfrag->u0.s.rsvd0 = 0;
871                         nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
872                         nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
873                         nicfrag->u0.s.frag_len = segs[i].ds_len;
874                         pd->wqe_idx = wq->ring->pidx;
875                         RING_PUT(wq->ring, 1);
876                         wq->ring->num_used++;
877                 }
878                 if (num_wqes > (pd->nsegs + 1)) {
879                         nicfrag =
880                             RING_GET_PRODUCER_ITEM_VA(wq->ring,
881                                                       struct oce_nic_frag_wqe);
882                         nicfrag->u0.dw[0] = 0;
883                         nicfrag->u0.dw[1] = 0;
884                         nicfrag->u0.dw[2] = 0;
885                         nicfrag->u0.dw[3] = 0;
886                         pd->wqe_idx = wq->ring->pidx;
887                         RING_PUT(wq->ring, 1);
888                         wq->ring->num_used++;
889                         pd->nsegs++;
890                 }
891
892                 sc->ifp->if_opackets++;
893                 wq->tx_stats.tx_reqs++;
894                 wq->tx_stats.tx_wrbs += num_wqes;
895                 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
896                 wq->tx_stats.tx_pkts++;
897         
898                 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
899                                 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
900                 reg_value = (num_wqes << 16) | wq->wq_id;
901                 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
902
903         } else if (rc == EFBIG) {
904                 if (retry_cnt == 0) {
905                         m_temp = m_defrag(m, M_DONTWAIT);
906                         if (m_temp == NULL)
907                                 goto free_ret;
908                         m = m_temp;
909                         *mpp = m_temp;
910                         retry_cnt = retry_cnt + 1;
911                         goto retry;
912                 } else
913                         goto free_ret;
914         } else if (rc == ENOMEM)
915                 return rc;
916         else
917                 goto free_ret;
918
919         return 0;
920
921 free_ret:
922         m_freem(*mpp);
923         *mpp = NULL;
924         return rc;
925 }
926
927
928 static void
929 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
930 {
931         uint32_t in;
932         struct oce_packet_desc *pd;
933         POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
934         struct mbuf *m;
935
936         if (wq->packets_out == wq->packets_in)
937                 device_printf(sc->dev, "WQ transmit descriptor missing\n");
938
939         in = wq->packets_in + 1;
940         if (in == OCE_WQ_PACKET_ARRAY_SIZE)
941                 in = 0;
942
943         pd = &wq->pckts[wq->packets_in];
944         wq->packets_in = in;
945         wq->ring->num_used -= (pd->nsegs + 1);
946         bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
947         bus_dmamap_unload(wq->tag, pd->map);
948
949         m = pd->mbuf;
950         m_freem(m);
951         pd->mbuf = NULL;
952
953         if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
954                 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
955                         sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
956                         oce_tx_restart(sc, wq); 
957                 }
958         }
959 }
960
961
962 static void
963 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
964 {
965
966         if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
967                 return;
968
969 #if __FreeBSD_version >= 800000
970         if (!drbr_empty(sc->ifp, wq->br))
971 #else
972         if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
973 #endif
974                 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
975
976 }
977
978
979 #if defined(INET6) || defined(INET)
980 static struct mbuf *
981 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
982 {
983         struct mbuf *m;
984 #ifdef INET
985         struct ip *ip;
986 #endif
987 #ifdef INET6
988         struct ip6_hdr *ip6;
989 #endif
990         struct ether_vlan_header *eh;
991         struct tcphdr *th;
992         uint16_t etype;
993         int total_len = 0, ehdrlen = 0;
994         
995         m = *mpp;
996
997         if (M_WRITABLE(m) == 0) {
998                 m = m_dup(*mpp, M_DONTWAIT);
999                 if (!m)
1000                         return NULL;
1001                 m_freem(*mpp);
1002                 *mpp = m;
1003         }
1004
1005         eh = mtod(m, struct ether_vlan_header *);
1006         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1007                 etype = ntohs(eh->evl_proto);
1008                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1009         } else {
1010                 etype = ntohs(eh->evl_encap_proto);
1011                 ehdrlen = ETHER_HDR_LEN;
1012         }
1013
1014         switch (etype) {
1015 #ifdef INET
1016         case ETHERTYPE_IP:
1017                 ip = (struct ip *)(m->m_data + ehdrlen);
1018                 if (ip->ip_p != IPPROTO_TCP)
1019                         return NULL;
1020                 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1021
1022                 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1023                 break;
1024 #endif
1025 #ifdef INET6
1026         case ETHERTYPE_IPV6:
1027                 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1028                 if (ip6->ip6_nxt != IPPROTO_TCP)
1029                         return NULL;
1030                 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1031
1032                 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1033                 break;
1034 #endif
1035         default:
1036                 return NULL;
1037         }
1038         
1039         m = m_pullup(m, total_len);
1040         if (!m)
1041                 return NULL;
1042         *mpp = m;
1043         return m;
1044         
1045 }
1046 #endif /* INET6 || INET */
1047
1048 void
1049 oce_tx_task(void *arg, int npending)
1050 {
1051         struct oce_wq *wq = arg;
1052         POCE_SOFTC sc = wq->parent;
1053         struct ifnet *ifp = sc->ifp;
1054         int rc = 0;
1055         
1056 #if __FreeBSD_version >= 800000
1057         if (TRY_LOCK(&wq->tx_lock)) {
1058                 rc = oce_multiq_transmit(ifp, NULL, wq);
1059                 if (rc) {
1060                         device_printf(sc->dev,
1061                          "TX[%d] restart failed\n", wq->queue_index);
1062                 }
1063                 UNLOCK(&wq->tx_lock);
1064         }
1065 #else
1066         oce_start(ifp);
1067 #endif
1068
1069 }
1070
1071
1072 void
1073 oce_start(struct ifnet *ifp)
1074 {
1075         POCE_SOFTC sc = ifp->if_softc;
1076         struct mbuf *m;
1077         int rc = 0;
1078         int def_q = 0; /* Defualt tx queue is 0*/
1079
1080         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1081                         IFF_DRV_RUNNING)
1082                 return;
1083         
1084         do {
1085                 IF_DEQUEUE(&sc->ifp->if_snd, m);
1086                 if (m == NULL)
1087                         break;
1088
1089                 LOCK(&sc->wq[def_q]->tx_lock);
1090                 rc = oce_tx(sc, &m, def_q);
1091                 UNLOCK(&sc->wq[def_q]->tx_lock);
1092                 if (rc) {
1093                         if (m != NULL) {
1094                                 sc->wq[def_q]->tx_stats.tx_stops ++;
1095                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1096                                 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1097                                 m = NULL;
1098                         }
1099                         break;
1100                 }
1101                 if (m != NULL)
1102                         ETHER_BPF_MTAP(ifp, m);
1103
1104         } while (TRUE);
1105
1106         return;
1107 }
1108
1109
1110 /* Handle the Completion Queue for transmit */
1111 uint16_t
1112 oce_wq_handler(void *arg)
1113 {
1114         struct oce_wq *wq = (struct oce_wq *)arg;
1115         POCE_SOFTC sc = wq->parent;
1116         struct oce_cq *cq = wq->cq;
1117         struct oce_nic_tx_cqe *cqe;
1118         int num_cqes = 0;
1119
1120         LOCK(&wq->tx_lock);
1121         bus_dmamap_sync(cq->ring->dma.tag,
1122                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1123         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1124         while (cqe->u0.dw[3]) {
1125                 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1126
1127                 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1128                 if (wq->ring->cidx >= wq->ring->num_items)
1129                         wq->ring->cidx -= wq->ring->num_items;
1130
1131                 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1132                 wq->tx_stats.tx_compl++;
1133                 cqe->u0.dw[3] = 0;
1134                 RING_GET(cq->ring, 1);
1135                 bus_dmamap_sync(cq->ring->dma.tag,
1136                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1137                 cqe =
1138                     RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1139                 num_cqes++;
1140         }
1141
1142         if (num_cqes)
1143                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1144         UNLOCK(&wq->tx_lock);
1145
1146         return 0;
1147 }
1148
1149
1150 static int 
1151 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1152 {
1153         POCE_SOFTC sc = ifp->if_softc;
1154         int status = 0, queue_index = 0;
1155         struct mbuf *next = NULL;
1156         struct buf_ring *br = NULL;
1157
1158         br  = wq->br;
1159         queue_index = wq->queue_index;
1160
1161         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1162                 IFF_DRV_RUNNING) {
1163                 if (m != NULL)
1164                         status = drbr_enqueue(ifp, br, m);
1165                 return status;
1166         }
1167
1168         if (m == NULL)
1169                 next = drbr_dequeue(ifp, br);           
1170         else if (drbr_needs_enqueue(ifp, br)) {
1171                 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1172                         return status;
1173                 next = drbr_dequeue(ifp, br);
1174         } else
1175                 next = m;
1176
1177         while (next != NULL) {
1178                 if (oce_tx(sc, &next, queue_index)) {
1179                         if (next != NULL) {
1180                                 wq->tx_stats.tx_stops ++;
1181                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1182                                 status = drbr_enqueue(ifp, br, next);
1183                         }  
1184                         break;
1185                 }
1186                 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
1187                 ETHER_BPF_MTAP(ifp, next);
1188                 next = drbr_dequeue(ifp, br);
1189         }
1190
1191         return status;
1192 }
1193
1194
1195
1196
1197 /*****************************************************************************
1198  *                          Receive  routines functions                      *
1199  *****************************************************************************/
1200
1201 static void
1202 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1203 {
1204         uint32_t out;
1205         struct oce_packet_desc *pd;
1206         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1207         int i, len, frag_len;
1208         struct mbuf *m = NULL, *tail = NULL;
1209         uint16_t vtag;
1210
1211         len = cqe->u0.s.pkt_size;
1212         if (!len) {
1213                 /*partial DMA workaround for Lancer*/
1214                 oce_discard_rx_comp(rq, cqe);
1215                 goto exit;
1216         }
1217
1218          /* Get vlan_tag value */
1219         if(IS_BE(sc))
1220                 vtag = BSWAP_16(cqe->u0.s.vlan_tag);
1221         else
1222                 vtag = cqe->u0.s.vlan_tag;
1223
1224
1225         for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1226
1227                 if (rq->packets_out == rq->packets_in) {
1228                         device_printf(sc->dev,
1229                                   "RQ transmit descriptor missing\n");
1230                 }
1231                 out = rq->packets_out + 1;
1232                 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1233                         out = 0;
1234                 pd = &rq->pckts[rq->packets_out];
1235                 rq->packets_out = out;
1236
1237                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1238                 bus_dmamap_unload(rq->tag, pd->map);
1239                 rq->pending--;
1240
1241                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1242                 pd->mbuf->m_len = frag_len;
1243
1244                 if (tail != NULL) {
1245                         /* additional fragments */
1246                         pd->mbuf->m_flags &= ~M_PKTHDR;
1247                         tail->m_next = pd->mbuf;
1248                         tail = pd->mbuf;
1249                 } else {
1250                         /* first fragment, fill out much of the packet header */
1251                         pd->mbuf->m_pkthdr.len = len;
1252                         pd->mbuf->m_pkthdr.csum_flags = 0;
1253                         if (IF_CSUM_ENABLED(sc)) {
1254                                 if (cqe->u0.s.l4_cksum_pass) {
1255                                         pd->mbuf->m_pkthdr.csum_flags |=
1256                                             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1257                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1258                                 }
1259                                 if (cqe->u0.s.ip_cksum_pass) {
1260                                         if (!cqe->u0.s.ip_ver) { /* IPV4 */
1261                                                 pd->mbuf->m_pkthdr.csum_flags |=
1262                                                 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1263                                         }
1264                                 }
1265                         }
1266                         m = tail = pd->mbuf;
1267                 }
1268                 pd->mbuf = NULL;
1269                 len -= frag_len;
1270         }
1271
1272         if (m) {
1273                 if (!oce_cqe_portid_valid(sc, cqe)) {
1274                          m_freem(m);
1275                          goto exit;
1276                 } 
1277
1278                 m->m_pkthdr.rcvif = sc->ifp;
1279 #if __FreeBSD_version >= 800000
1280                 m->m_pkthdr.flowid = rq->queue_index;
1281                 m->m_flags |= M_FLOWID;
1282 #endif
1283                 /* This deternies if vlan tag is Valid */
1284                 if (oce_cqe_vtp_valid(sc, cqe)) { 
1285                         if (sc->function_mode & FNM_FLEX10_MODE) {
1286                                 /* FLEX10. If QnQ is not set, neglect VLAN */
1287                                 if (cqe->u0.s.qnq) {
1288                                         m->m_pkthdr.ether_vtag = vtag;
1289                                         m->m_flags |= M_VLANTAG;
1290                                 }
1291                         } else if (sc->pvid != (vtag & VLAN_VID_MASK))  {
1292                                 /* In UMC mode generally pvid will be striped by
1293                                    hw. But in some cases we have seen it comes
1294                                    with pvid. So if pvid == vlan, neglect vlan.
1295                                 */
1296                                 m->m_pkthdr.ether_vtag = vtag;
1297                                 m->m_flags |= M_VLANTAG;
1298                         }
1299                 }
1300
1301                 sc->ifp->if_ipackets++;
1302 #if defined(INET6) || defined(INET)
1303                 /* Try to queue to LRO */
1304                 if (IF_LRO_ENABLED(sc) &&
1305                     !(m->m_flags & M_VLANTAG) &&
1306                     (cqe->u0.s.ip_cksum_pass) &&
1307                     (cqe->u0.s.l4_cksum_pass) &&
1308                     (!cqe->u0.s.ip_ver)       &&
1309                     (rq->lro.lro_cnt != 0)) {
1310
1311                         if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1312                                 rq->lro_pkts_queued ++;         
1313                                 goto post_done;
1314                         }
1315                         /* If LRO posting fails then try to post to STACK */
1316                 }
1317 #endif
1318         
1319                 (*sc->ifp->if_input) (sc->ifp, m);
1320 #if defined(INET6) || defined(INET)
1321 post_done:
1322 #endif
1323                 /* Update rx stats per queue */
1324                 rq->rx_stats.rx_pkts++;
1325                 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1326                 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1327                 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1328                         rq->rx_stats.rx_mcast_pkts++;
1329                 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1330                         rq->rx_stats.rx_ucast_pkts++;
1331         }
1332 exit:
1333         return;
1334 }
1335
1336
1337 static void
1338 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1339 {
1340         uint32_t out, i = 0;
1341         struct oce_packet_desc *pd;
1342         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1343         int num_frags = cqe->u0.s.num_fragments;
1344
1345         if (IS_XE201(sc) && cqe->u0.s.error) {
1346                 /* Lancer A0 workaround
1347                 * num_frags will be 1 more than actual in case of error
1348                  */
1349                 if (num_frags)
1350                         num_frags -= 1;
1351         }
1352         for (i = 0; i < num_frags; i++) {
1353                 if (rq->packets_out == rq->packets_in) {
1354                         device_printf(sc->dev,
1355                                 "RQ transmit descriptor missing\n");
1356                 }
1357                 out = rq->packets_out + 1;
1358                 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1359                         out = 0;
1360                 pd = &rq->pckts[rq->packets_out];
1361                 rq->packets_out = out;
1362
1363                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1364                 bus_dmamap_unload(rq->tag, pd->map);
1365                 rq->pending--;
1366                 m_freem(pd->mbuf);
1367         }
1368
1369 }
1370
1371
1372 static int
1373 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1374 {
1375         struct oce_nic_rx_cqe_v1 *cqe_v1;
1376         int vtp = 0;
1377
1378         if (sc->be3_native) {
1379                 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1380                 vtp =  cqe_v1->u0.s.vlan_tag_present; 
1381         } else
1382                 vtp = cqe->u0.s.vlan_tag_present;
1383         
1384         return vtp;
1385
1386 }
1387
1388
1389 static int
1390 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1391 {
1392         struct oce_nic_rx_cqe_v1 *cqe_v1;
1393         int port_id = 0;
1394
1395         if (sc->be3_native && IS_BE(sc)) {
1396                 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1397                 port_id =  cqe_v1->u0.s.port;
1398                 if (sc->port_id != port_id)
1399                         return 0;
1400         } else
1401                 ;/* For BE3 legacy and Lancer this is dummy */
1402         
1403         return 1;
1404
1405 }
1406
1407 #if defined(INET6) || defined(INET)
1408 static void
1409 oce_rx_flush_lro(struct oce_rq *rq)
1410 {
1411         struct lro_ctrl *lro = &rq->lro;
1412         struct lro_entry *queued;
1413         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1414
1415         if (!IF_LRO_ENABLED(sc))
1416                 return;
1417
1418         while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1419                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1420                 tcp_lro_flush(lro, queued);
1421         }
1422         rq->lro_pkts_queued = 0;
1423         
1424         return;
1425 }
1426
1427
1428 static int
1429 oce_init_lro(POCE_SOFTC sc)
1430 {
1431         struct lro_ctrl *lro = NULL;
1432         int i = 0, rc = 0;
1433
1434         for (i = 0; i < sc->nrqs; i++) { 
1435                 lro = &sc->rq[i]->lro;
1436                 rc = tcp_lro_init(lro);
1437                 if (rc != 0) {
1438                         device_printf(sc->dev, "LRO init failed\n");
1439                         return rc;              
1440                 }
1441                 lro->ifp = sc->ifp;
1442         }
1443
1444         return rc;              
1445 }
1446
1447
1448 void
1449 oce_free_lro(POCE_SOFTC sc)
1450 {
1451         struct lro_ctrl *lro = NULL;
1452         int i = 0;
1453
1454         for (i = 0; i < sc->nrqs; i++) {
1455                 lro = &sc->rq[i]->lro;
1456                 if (lro)
1457                         tcp_lro_free(lro);
1458         }
1459 }
1460 #endif /* INET6 || INET */
1461
1462 int
1463 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1464 {
1465         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1466         int i, in, rc;
1467         struct oce_packet_desc *pd;
1468         bus_dma_segment_t segs[6];
1469         int nsegs, added = 0;
1470         struct oce_nic_rqe *rqe;
1471         pd_rxulp_db_t rxdb_reg;
1472
1473
1474         for (i = 0; i < count; i++) {
1475                 in = rq->packets_in + 1;
1476                 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1477                         in = 0;
1478                 if (in == rq->packets_out)
1479                         break;  /* no more room */
1480
1481                 pd = &rq->pckts[rq->packets_in];
1482                 pd->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1483                 if (pd->mbuf == NULL)
1484                         break;
1485
1486                 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1487                 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1488                                              pd->map,
1489                                              pd->mbuf,
1490                                              segs, &nsegs, BUS_DMA_NOWAIT);
1491                 if (rc) {
1492                         m_free(pd->mbuf);
1493                         break;
1494                 }
1495
1496                 if (nsegs != 1) {
1497                         i--;
1498                         continue;
1499                 }
1500
1501                 rq->packets_in = in;
1502                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1503
1504                 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1505                 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1506                 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1507                 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1508                 RING_PUT(rq->ring, 1);
1509                 added++;
1510                 rq->pending++;
1511         }
1512         if (added != 0) {
1513                 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1514                         DELAY(1);
1515                         rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1516                         rxdb_reg.bits.qid = rq->rq_id;
1517                         OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1518                         added -= OCE_MAX_RQ_POSTS;
1519                 }
1520                 if (added > 0) {
1521                         DELAY(1);
1522                         rxdb_reg.bits.qid = rq->rq_id;
1523                         rxdb_reg.bits.num_posted = added;
1524                         OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1525                 }
1526         }
1527         
1528         return 0;       
1529 }
1530
1531
1532 /* Handle the Completion Queue for receive */
1533 uint16_t
1534 oce_rq_handler(void *arg)
1535 {
1536         struct oce_rq *rq = (struct oce_rq *)arg;
1537         struct oce_cq *cq = rq->cq;
1538         POCE_SOFTC sc = rq->parent;
1539         struct oce_nic_rx_cqe *cqe;
1540         int num_cqes = 0, rq_buffers_used = 0;
1541
1542
1543         LOCK(&rq->rx_lock);
1544         bus_dmamap_sync(cq->ring->dma.tag,
1545                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1546         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1547         while (cqe->u0.dw[2]) {
1548                 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1549
1550                 RING_GET(rq->ring, 1);
1551                 if (cqe->u0.s.error == 0) {
1552                         oce_rx(rq, cqe->u0.s.frag_index, cqe);
1553                 } else {
1554                         rq->rx_stats.rxcp_err++;
1555                         sc->ifp->if_ierrors++;
1556                         if (IS_XE201(sc)) 
1557                                 /* Lancer A0 no buffer workaround */
1558                                 oce_discard_rx_comp(rq, cqe);
1559                         else    
1560                                 /* Post L3/L4 errors to stack.*/
1561                                 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1562                         
1563                 }
1564                 rq->rx_stats.rx_compl++;
1565                 cqe->u0.dw[2] = 0;
1566
1567 #if defined(INET6) || defined(INET)
1568                 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1569                         oce_rx_flush_lro(rq);
1570                 }
1571 #endif
1572
1573                 RING_GET(cq->ring, 1);
1574                 bus_dmamap_sync(cq->ring->dma.tag,
1575                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1576                 cqe =
1577                     RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1578                 num_cqes++;
1579                 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1580                         break;
1581         }
1582
1583 #if defined(INET6) || defined(INET)
1584         if (IF_LRO_ENABLED(sc))
1585                 oce_rx_flush_lro(rq);
1586 #endif
1587         
1588         if (num_cqes) {
1589                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1590                 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1591                 if (rq_buffers_used > 1)
1592                         oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1593         }
1594
1595         UNLOCK(&rq->rx_lock);
1596         
1597         return 0;
1598
1599 }
1600
1601
1602
1603
1604 /*****************************************************************************
1605  *                 Helper function prototypes in this file                   *
1606  *****************************************************************************/
1607
1608 static int 
1609 oce_attach_ifp(POCE_SOFTC sc)
1610 {
1611
1612         sc->ifp = if_alloc(IFT_ETHER);
1613         if (!sc->ifp)
1614                 return ENOMEM;
1615
1616         ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1617         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1618         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1619
1620         sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1621         sc->ifp->if_ioctl = oce_ioctl;
1622         sc->ifp->if_start = oce_start;
1623         sc->ifp->if_init = oce_init;
1624         sc->ifp->if_mtu = ETHERMTU;
1625         sc->ifp->if_softc = sc;
1626 #if __FreeBSD_version >= 800000
1627         sc->ifp->if_transmit = oce_multiq_start;
1628         sc->ifp->if_qflush = oce_multiq_flush;
1629 #endif
1630
1631         if_initname(sc->ifp,
1632                     device_get_name(sc->dev), device_get_unit(sc->dev));
1633
1634         sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1635         IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1636         IFQ_SET_READY(&sc->ifp->if_snd);
1637
1638         sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1639         sc->ifp->if_hwassist |= CSUM_TSO;
1640         sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1641
1642         sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1643         sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1644         sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1645
1646 #if defined(INET6) || defined(INET)
1647         sc->ifp->if_capabilities |= IFCAP_TSO;
1648         sc->ifp->if_capabilities |= IFCAP_LRO;
1649         sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
1650 #endif
1651         
1652         sc->ifp->if_capenable = sc->ifp->if_capabilities;
1653         sc->ifp->if_baudrate = IF_Gbps(10UL);
1654
1655         ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1656         
1657         return 0;
1658 }
1659
1660
1661 static void
1662 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1663 {
1664         POCE_SOFTC sc = ifp->if_softc;
1665
1666         if (ifp->if_softc !=  arg)
1667                 return;
1668         if ((vtag == 0) || (vtag > 4095))
1669                 return;
1670
1671         sc->vlan_tag[vtag] = 1;
1672         sc->vlans_added++;
1673         oce_vid_config(sc);
1674 }
1675
1676
1677 static void
1678 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1679 {
1680         POCE_SOFTC sc = ifp->if_softc;
1681
1682         if (ifp->if_softc !=  arg)
1683                 return;
1684         if ((vtag == 0) || (vtag > 4095))
1685                 return;
1686
1687         sc->vlan_tag[vtag] = 0;
1688         sc->vlans_added--;
1689         oce_vid_config(sc);
1690 }
1691
1692
1693 /*
1694  * A max of 64 vlans can be configured in BE. If the user configures
1695  * more, place the card in vlan promiscuous mode.
1696  */
1697 static int
1698 oce_vid_config(POCE_SOFTC sc)
1699 {
1700         struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1701         uint16_t ntags = 0, i;
1702         int status = 0;
1703
1704         if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) && 
1705                         (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1706                 for (i = 0; i < MAX_VLANS; i++) {
1707                         if (sc->vlan_tag[i]) {
1708                                 vtags[ntags].vtag = i;
1709                                 ntags++;
1710                         }
1711                 }
1712                 if (ntags)
1713                         status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1714                                                 vtags, ntags, 1, 0); 
1715         } else 
1716                 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1717                                                 NULL, 0, 1, 1);
1718         return status;
1719 }
1720
1721
1722 static void
1723 oce_mac_addr_set(POCE_SOFTC sc)
1724 {
1725         uint32_t old_pmac_id = sc->pmac_id;
1726         int status = 0;
1727
1728         
1729         status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1730                          sc->macaddr.size_of_struct);
1731         if (!status)
1732                 return;
1733
1734         status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1735                                         sc->if_id, &sc->pmac_id);
1736         if (!status) {
1737                 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1738                 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1739                                  sc->macaddr.size_of_struct); 
1740         }
1741         if (status)
1742                 device_printf(sc->dev, "Failed update macaddress\n");
1743
1744 }
1745
1746
1747 static int
1748 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1749 {
1750         POCE_SOFTC sc = ifp->if_softc;
1751         struct ifreq *ifr = (struct ifreq *)data;
1752         int rc = ENXIO;
1753         char cookie[32] = {0};
1754         void *priv_data = (void *)ifr->ifr_data;
1755         void *ioctl_ptr;
1756         uint32_t req_size;
1757         struct mbx_hdr req;
1758         OCE_DMA_MEM dma_mem;
1759
1760
1761         if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1762                 return EFAULT;
1763         
1764         if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1765                 return EINVAL;
1766         
1767         ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1768         if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1769                 return EFAULT;
1770         
1771         req_size = le32toh(req.u0.req.request_length);
1772         if (req_size > 65536)
1773                 return EINVAL;
1774
1775         req_size += sizeof(struct mbx_hdr);
1776         rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1777         if (rc)
1778                 return ENOMEM;
1779
1780         if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1781                 rc = EFAULT;
1782                 goto dma_free;
1783         }
1784
1785         rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1786         if (rc) {
1787                 rc = EIO;
1788                 goto dma_free;
1789         }
1790
1791         if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1792                 rc =  EFAULT;
1793
1794 dma_free:
1795         oce_dma_free(sc, &dma_mem);
1796         return rc;
1797
1798 }
1799
1800
1801 static void
1802 oce_local_timer(void *arg)
1803 {
1804         POCE_SOFTC sc = arg;
1805         int i = 0;
1806         
1807         oce_refresh_nic_stats(sc);
1808         oce_refresh_queue_stats(sc);
1809         oce_mac_addr_set(sc);
1810         
1811         /* TX Watch Dog*/
1812         for (i = 0; i < sc->nwqs; i++)
1813                 oce_tx_restart(sc, sc->wq[i]);
1814         
1815         callout_reset(&sc->timer, hz, oce_local_timer, sc);
1816 }
1817
1818
1819 static void
1820 oce_if_deactivate(POCE_SOFTC sc)
1821 {
1822         int i, mtime = 0;
1823         int wait_req = 0;
1824         struct oce_rq *rq;
1825         struct oce_wq *wq;
1826         struct oce_eq *eq;
1827
1828         sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1829
1830         /*Wait for max of 400ms for TX completions to be done */
1831         while (mtime < 400) {
1832                 wait_req = 0;
1833                 for_all_wq_queues(sc, wq, i) {
1834                         if (wq->ring->num_used) {
1835                                 wait_req = 1;
1836                                 DELAY(1);
1837                                 break;
1838                         }
1839                 }
1840                 mtime += 1;
1841                 if (!wait_req)
1842                         break;
1843         }
1844
1845         /* Stop intrs and finish any bottom halves pending */
1846         oce_hw_intr_disable(sc);
1847
1848         for (i = 0; i < sc->intr_count; i++) {
1849                 if (sc->intrs[i].tq != NULL) {
1850                         taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1851                 }
1852         }
1853
1854         /* Delete RX queue in card with flush param */
1855         oce_stop_rx(sc);
1856
1857         /* Invalidate any pending cq and eq entries*/   
1858         for_all_evnt_queues(sc, eq, i)  
1859                 oce_drain_eq(eq);
1860         for_all_rq_queues(sc, rq, i)
1861                 oce_drain_rq_cq(rq);
1862         for_all_wq_queues(sc, wq, i)
1863                 oce_drain_wq_cq(wq);
1864
1865         /* But still we need to get MCC aync events.
1866            So enable intrs and also arm first EQ
1867         */
1868         oce_hw_intr_enable(sc);
1869         oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1870
1871         DELAY(10);
1872 }
1873
1874
1875 static void
1876 oce_if_activate(POCE_SOFTC sc)
1877 {
1878         struct oce_eq *eq;
1879         struct oce_rq *rq;
1880         struct oce_wq *wq;
1881         int i, rc = 0;
1882
1883         sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; 
1884         
1885         oce_hw_intr_disable(sc);
1886         
1887         oce_start_rx(sc);
1888
1889         for_all_rq_queues(sc, rq, i) {
1890                 rc = oce_start_rq(rq);
1891                 if (rc)
1892                         device_printf(sc->dev, "Unable to start RX\n");
1893         }
1894
1895         for_all_wq_queues(sc, wq, i) {
1896                 rc = oce_start_wq(wq);
1897                 if (rc)
1898                         device_printf(sc->dev, "Unable to start TX\n");
1899         }
1900
1901         
1902         for_all_evnt_queues(sc, eq, i)
1903                 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1904
1905         oce_hw_intr_enable(sc);
1906
1907 }
1908
1909 static void
1910 process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
1911 {
1912         /* Update Link status */
1913         if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1914              ASYNC_EVENT_LINK_UP) {
1915                 sc->link_status = ASYNC_EVENT_LINK_UP;
1916                 if_link_state_change(sc->ifp, LINK_STATE_UP);
1917         } else {
1918                 sc->link_status = ASYNC_EVENT_LINK_DOWN;
1919                 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1920         }
1921
1922         /* Update speed */
1923         sc->link_speed = acqe->u0.s.speed;
1924         sc->qos_link_speed = (uint32_t) acqe->u0.s.qos_link_speed * 10;
1925
1926 }
1927
1928
1929 /* Handle the Completion Queue for the Mailbox/Async notifications */
1930 uint16_t
1931 oce_mq_handler(void *arg)
1932 {
1933         struct oce_mq *mq = (struct oce_mq *)arg;
1934         POCE_SOFTC sc = mq->parent;
1935         struct oce_cq *cq = mq->cq;
1936         int num_cqes = 0, evt_type = 0, optype = 0;
1937         struct oce_mq_cqe *cqe;
1938         struct oce_async_cqe_link_state *acqe;
1939         struct oce_async_event_grp5_pvid_state *gcqe;
1940
1941
1942         bus_dmamap_sync(cq->ring->dma.tag,
1943                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1944         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1945
1946         while (cqe->u0.dw[3]) {
1947                 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1948                 if (cqe->u0.s.async_event) {
1949                         evt_type = cqe->u0.s.event_type;
1950                         optype = cqe->u0.s.async_type;
1951                         if (evt_type  == ASYNC_EVENT_CODE_LINK_STATE) {
1952                                 /* Link status evt */
1953                                 acqe = (struct oce_async_cqe_link_state *)cqe;
1954                                 process_link_state(sc, acqe);
1955                         } else if ((evt_type == ASYNC_EVENT_GRP5) &&
1956                                    (optype == ASYNC_EVENT_PVID_STATE)) {
1957                                 /* GRP5 PVID */
1958                                 gcqe = 
1959                                 (struct oce_async_event_grp5_pvid_state *)cqe;
1960                                 if (gcqe->enabled)
1961                                         sc->pvid = gcqe->tag & VLAN_VID_MASK;
1962                                 else
1963                                         sc->pvid = 0;
1964                                 
1965                         }
1966                 }
1967                 cqe->u0.dw[3] = 0;
1968                 RING_GET(cq->ring, 1);
1969                 bus_dmamap_sync(cq->ring->dma.tag,
1970                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1971                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1972                 num_cqes++;
1973         }
1974
1975         if (num_cqes)
1976                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1977
1978         return 0;
1979 }
1980
1981
1982 static void
1983 setup_max_queues_want(POCE_SOFTC sc)
1984 {
1985         int max_rss = 0;
1986
1987         /* Check if it is FLEX machine. Is so dont use RSS */   
1988         if ((sc->function_mode & FNM_FLEX10_MODE) ||
1989             (sc->function_mode & FNM_UMC_MODE)    ||
1990             (sc->function_mode & FNM_VNIC_MODE)   ||
1991             (!sc->rss_enable)                     ||
1992             (sc->flags & OCE_FLAGS_BE2)) {
1993                 sc->nrqs = 1;
1994                 sc->nwqs = 1;
1995                 sc->rss_enable = 0;
1996         } else {
1997                 /* For multiq, our deisgn is to have TX rings equal to 
1998                    RSS rings. So that we can pair up one RSS ring and TX
1999                    to a single intr, which improves CPU cache efficiency.
2000                  */
2001                 if (IS_BE(sc) && (!sc->be3_native))
2002                         max_rss = OCE_LEGACY_MODE_RSS;
2003                 else
2004                         max_rss = OCE_MAX_RSS;
2005
2006                 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2007                 sc->nwqs = MIN(OCE_NCPUS, max_rss);
2008         }
2009
2010 }
2011
2012
2013 static void
2014 update_queues_got(POCE_SOFTC sc)
2015 {
2016         if (sc->rss_enable) {
2017                 sc->nrqs = sc->intr_count + 1;
2018                 sc->nwqs = sc->intr_count;
2019         } else {
2020                 sc->nrqs = 1;
2021                 sc->nwqs = 1;
2022         }
2023 }
2024