]> CyberLeo.Net >> Repos - FreeBSD/releng/9.1.git/blob - sys/dev/oce/oce_if.c
Copy stable/9 to releng/9.1 as part of the 9.1-RELEASE release process.
[FreeBSD/releng/9.1.git] / sys / dev / oce / oce_if.c
1 /*-
2  * Copyright (C) 2012 Emulex
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the Emulex Corporation nor the names of its
16  *    contributors may be used to endorse or promote products derived from
17  *    this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Contact Information:
32  * freebsd-drivers@emulex.com
33  *
34  * Emulex
35  * 3333 Susan Street
36  * Costa Mesa, CA 92626
37  */
38
39
40 /* $FreeBSD$ */
41
42 #include "opt_inet6.h"
43 #include "opt_inet.h"
44
45 #include "oce_if.h"
46
47
48 /* Driver entry points prototypes */
49 static int  oce_probe(device_t dev);
50 static int  oce_attach(device_t dev);
51 static int  oce_detach(device_t dev);
52 static int  oce_shutdown(device_t dev);
53 static int  oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
54 static void oce_init(void *xsc);
55 static int  oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
56 static void oce_multiq_flush(struct ifnet *ifp);
57
58 /* Driver interrupt routines protypes */
59 static void oce_intr(void *arg, int pending);
60 static int  oce_setup_intr(POCE_SOFTC sc);
61 static int  oce_fast_isr(void *arg);
62 static int  oce_alloc_intr(POCE_SOFTC sc, int vector,
63                           void (*isr) (void *arg, int pending));
64
65 /* Media callbacks prototypes */
66 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
67 static int  oce_media_change(struct ifnet *ifp);
68
69 /* Transmit routines prototypes */
70 static int  oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
71 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
72 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
73                                         uint32_t status);
74 #if defined(INET6) || defined(INET)
75 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp,
76                                         uint16_t *mss);
77 #endif
78 static int  oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
79                                  struct oce_wq *wq);
80
81 /* Receive routines prototypes */
82 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
83 static int  oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
84 static int  oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
85 #if defined(INET6) || defined(INET)
86 static void oce_rx_flush_lro(struct oce_rq *rq);
87 #endif
88 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
89                                                 struct oce_nic_rx_cqe *cqe);
90
91 /* Helper function prototypes in this file */
92 static int  oce_attach_ifp(POCE_SOFTC sc);
93 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
94 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
95 static int  oce_vid_config(POCE_SOFTC sc);
96 static void oce_mac_addr_set(POCE_SOFTC sc);
97 static int  oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
98 static void oce_local_timer(void *arg);
99 #if defined(INET6) || defined(INET)
100 static int  oce_init_lro(POCE_SOFTC sc);
101 #endif
102 static void oce_if_deactivate(POCE_SOFTC sc);
103 static void oce_if_activate(POCE_SOFTC sc);
104 static void setup_max_queues_want(POCE_SOFTC sc);
105 static void update_queues_got(POCE_SOFTC sc);
106
107 static device_method_t oce_dispatch[] = {
108         DEVMETHOD(device_probe, oce_probe),
109         DEVMETHOD(device_attach, oce_attach),
110         DEVMETHOD(device_detach, oce_detach),
111         DEVMETHOD(device_shutdown, oce_shutdown),
112         {0, 0}
113 };
114
115 static driver_t oce_driver = {
116         "oce",
117         oce_dispatch,
118         sizeof(OCE_SOFTC)
119 };
120 static devclass_t oce_devclass;
121
122
123 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
124 MODULE_DEPEND(oce, pci, 1, 1, 1);
125 MODULE_DEPEND(oce, ether, 1, 1, 1);
126 MODULE_VERSION(oce, 1);
127
128
129 /* global vars */
130 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
131
132 /* Module capabilites and parameters */
133 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
134 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
135
136
137 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
138 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
139
140
141 /* Supported devices table */
142 static uint32_t supportedDevices[] =  {
143         (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
144         (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
145         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
146         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
147         (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
148 };
149
150
151
152
153 /*****************************************************************************
154  *                      Driver entry points functions                        *
155  *****************************************************************************/
156
157 static int
158 oce_probe(device_t dev)
159 {
160         uint16_t vendor;
161         uint16_t device;
162         int i;
163         char str[80];
164         POCE_SOFTC sc;
165
166         sc = device_get_softc(dev);
167         bzero(sc, sizeof(OCE_SOFTC));
168         sc->dev = dev;
169
170         vendor = pci_get_vendor(dev);
171         device = pci_get_device(dev);
172
173         for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint16_t)); i++) {
174                 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
175                         if (device == (supportedDevices[i] & 0xffff)) {
176                                 sprintf(str, "%s:%s",
177                                         "Emulex CNA NIC function",
178                                         component_revision);
179                                 device_set_desc_copy(dev, str);
180
181                                 switch (device) {
182                                 case PCI_PRODUCT_BE2:
183                                         sc->flags |= OCE_FLAGS_BE2;
184                                         break;
185                                 case PCI_PRODUCT_BE3:
186                                         sc->flags |= OCE_FLAGS_BE3;
187                                         break;
188                                 case PCI_PRODUCT_XE201:
189                                 case PCI_PRODUCT_XE201_VF:
190                                         sc->flags |= OCE_FLAGS_XE201;
191                                         break;
192                                 default:
193                                         return ENXIO;
194                                 }
195                                 return BUS_PROBE_DEFAULT;
196                         }
197                 }
198         }
199
200         return ENXIO;
201 }
202
203
204 static int
205 oce_attach(device_t dev)
206 {
207         POCE_SOFTC sc;
208         int rc = 0;
209
210         sc = device_get_softc(dev);
211
212         rc = oce_hw_pci_alloc(sc);
213         if (rc)
214                 return rc;
215
216         sc->rss_enable   = oce_enable_rss;
217         sc->tx_ring_size = OCE_TX_RING_SIZE;
218         sc->rx_ring_size = OCE_RX_RING_SIZE;
219         sc->rq_frag_size = OCE_RQ_BUF_SIZE;
220         sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
221         sc->promisc      = OCE_DEFAULT_PROMISCUOUS;
222
223         LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
224         LOCK_CREATE(&sc->dev_lock,  "Device_lock");
225
226         /* initialise the hardware */
227         rc = oce_hw_init(sc);
228         if (rc)
229                 goto pci_res_free;
230
231
232         setup_max_queues_want(sc);      
233
234
235         rc = oce_setup_intr(sc);
236         if (rc)
237                 goto mbox_free;
238
239
240         rc = oce_queue_init_all(sc);
241         if (rc)
242                 goto intr_free;
243
244
245         rc = oce_attach_ifp(sc);
246         if (rc)
247                 goto queues_free;
248
249
250 #if defined(INET6) || defined(INET)
251         rc = oce_init_lro(sc);
252         if (rc)
253                 goto ifp_free;  
254 #endif
255
256
257         rc = oce_hw_start(sc);
258         if (rc)
259                 goto lro_free;;
260
261
262         sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
263                                 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
264         sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
265                                 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
266
267         rc = oce_stats_init(sc);
268         if (rc)
269                 goto vlan_free;
270
271         oce_add_sysctls(sc);
272
273
274         callout_init(&sc->timer, CALLOUT_MPSAFE);
275         rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
276         if (rc)
277                 goto stats_free;
278
279         return 0;
280
281 stats_free:
282         callout_drain(&sc->timer);
283         oce_stats_free(sc);
284 vlan_free:
285         if (sc->vlan_attach)
286                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
287         if (sc->vlan_detach)
288                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
289         oce_hw_intr_disable(sc);
290 lro_free:
291 #if defined(INET6) || defined(INET)
292         oce_free_lro(sc);
293 ifp_free:
294 #endif
295         ether_ifdetach(sc->ifp);
296         if_free(sc->ifp);
297 queues_free:
298         oce_queue_release_all(sc);
299 intr_free:
300         oce_intr_free(sc);
301 mbox_free:
302         oce_dma_free(sc, &sc->bsmbx);
303 pci_res_free:
304         oce_hw_pci_free(sc);
305         LOCK_DESTROY(&sc->dev_lock);
306         LOCK_DESTROY(&sc->bmbx_lock);
307         return rc;
308
309 }
310
311
312 static int
313 oce_detach(device_t dev)
314 {
315         POCE_SOFTC sc = device_get_softc(dev);
316
317         LOCK(&sc->dev_lock);
318         
319         oce_if_deactivate(sc);
320
321         UNLOCK(&sc->dev_lock);
322
323         callout_drain(&sc->timer);
324         
325         if (sc->vlan_attach != NULL)
326                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
327         if (sc->vlan_detach != NULL)
328                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
329
330         ether_ifdetach(sc->ifp);
331
332         if_free(sc->ifp);
333
334         oce_hw_shutdown(sc);
335
336         bus_generic_detach(dev);
337
338         return 0;
339 }
340
341
342 static int
343 oce_shutdown(device_t dev)
344 {
345         int rc;
346         
347         rc = oce_detach(dev);
348
349         return rc;      
350 }
351
352
353 static int
354 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
355 {
356         struct ifreq *ifr = (struct ifreq *)data;
357         POCE_SOFTC sc = ifp->if_softc;
358         int rc = 0;
359         uint32_t u;
360
361         switch (command) {
362         case SIOCGIFPSRCADDR_IN6:
363                 rc = ether_ioctl(ifp, command, data);
364                 break;
365
366         case SIOCGIFPSRCADDR:
367                 rc = ether_ioctl(ifp, command, data);
368                 break;
369
370         case SIOCGIFSTATUS:
371                 rc = ether_ioctl(ifp, command, data);
372                 break;
373
374         case SIOCGIFMEDIA:
375                 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
376                 break;
377
378         case SIOCSIFMEDIA:
379                 rc = ether_ioctl(ifp, command, data);
380                 break;
381
382         case SIOCGIFGENERIC:
383                 rc = ether_ioctl(ifp, command, data);
384                 break;
385
386         case SIOCGETMIFCNT_IN6:
387                 rc = ether_ioctl(ifp, command, data);
388                 break;
389
390         case SIOCSIFMTU:
391                 if (ifr->ifr_mtu > OCE_MAX_MTU)
392                         rc = EINVAL;
393                 else
394                         ifp->if_mtu = ifr->ifr_mtu;
395                 break;
396
397         case SIOCSIFFLAGS:
398                 if (ifp->if_flags & IFF_UP) {
399                         if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
400                                 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;       
401                                 oce_init(sc);
402                         }
403                         device_printf(sc->dev, "Interface Up\n");       
404                 } else {
405                         LOCK(&sc->dev_lock);
406
407                         sc->ifp->if_drv_flags &=
408                             ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
409                         oce_if_deactivate(sc);
410
411                         UNLOCK(&sc->dev_lock);
412
413                         device_printf(sc->dev, "Interface Down\n");
414                 }
415
416                 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
417                         sc->promisc = TRUE;
418                         oce_rxf_set_promiscuous(sc, sc->promisc);
419                 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
420                         sc->promisc = FALSE;
421                         oce_rxf_set_promiscuous(sc, sc->promisc);
422                 }
423
424                 break;
425
426         case SIOCADDMULTI:
427         case SIOCDELMULTI:
428                 rc = oce_hw_update_multicast(sc);
429                 if (rc)
430                         device_printf(sc->dev,
431                                 "Update multicast address failed\n");
432                 break;
433
434         case SIOCSIFCAP:
435                 u = ifr->ifr_reqcap ^ ifp->if_capenable;
436
437                 if (u & IFCAP_TXCSUM) {
438                         ifp->if_capenable ^= IFCAP_TXCSUM;
439                         ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
440                         
441                         if (IFCAP_TSO & ifp->if_capenable &&
442                             !(IFCAP_TXCSUM & ifp->if_capenable)) {
443                                 ifp->if_capenable &= ~IFCAP_TSO;
444                                 ifp->if_hwassist &= ~CSUM_TSO;
445                                 if_printf(ifp,
446                                          "TSO disabled due to -txcsum.\n");
447                         }
448                 }
449
450                 if (u & IFCAP_RXCSUM)
451                         ifp->if_capenable ^= IFCAP_RXCSUM;
452
453                 if (u & IFCAP_TSO4) {
454                         ifp->if_capenable ^= IFCAP_TSO4;
455
456                         if (IFCAP_TSO & ifp->if_capenable) {
457                                 if (IFCAP_TXCSUM & ifp->if_capenable)
458                                         ifp->if_hwassist |= CSUM_TSO;
459                                 else {
460                                         ifp->if_capenable &= ~IFCAP_TSO;
461                                         ifp->if_hwassist &= ~CSUM_TSO;
462                                         if_printf(ifp,
463                                             "Enable txcsum first.\n");
464                                         rc = EAGAIN;
465                                 }
466                         } else
467                                 ifp->if_hwassist &= ~CSUM_TSO;
468                 }
469
470                 if (u & IFCAP_VLAN_HWTAGGING)
471                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
472
473                 if (u & IFCAP_VLAN_HWFILTER) {
474                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
475                         oce_vid_config(sc);
476                 }
477
478 #if defined(INET6) || defined(INET)
479                 if (u & IFCAP_LRO)
480                         ifp->if_capenable ^= IFCAP_LRO;
481 #endif
482
483                 break;
484
485         case SIOCGPRIVATE_0:
486                 rc = oce_handle_passthrough(ifp, data);
487                 break;
488         default:
489                 rc = ether_ioctl(ifp, command, data);
490                 break;
491         }
492
493         return rc;
494 }
495
496
497 static void
498 oce_init(void *arg)
499 {
500         POCE_SOFTC sc = arg;
501         
502         LOCK(&sc->dev_lock);
503
504         if (sc->ifp->if_flags & IFF_UP) {
505                 oce_if_deactivate(sc);
506                 oce_if_activate(sc);
507         }
508         
509         UNLOCK(&sc->dev_lock);
510
511 }
512
513
514 static int
515 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
516 {
517         POCE_SOFTC sc = ifp->if_softc;
518         struct oce_wq *wq = NULL;
519         int queue_index = 0;
520         int status = 0;
521         
522         if ((m->m_flags & M_FLOWID) != 0)
523                 queue_index = m->m_pkthdr.flowid % sc->nwqs;
524         
525         wq = sc->wq[queue_index];
526
527         if (TRY_LOCK(&wq->tx_lock)) {
528                 status = oce_multiq_transmit(ifp, m, wq);
529                 UNLOCK(&wq->tx_lock);
530         } else {
531                 status = drbr_enqueue(ifp, wq->br, m);          
532         }
533         return status;
534
535 }
536
537
538 static void
539 oce_multiq_flush(struct ifnet *ifp)
540 {
541         POCE_SOFTC sc = ifp->if_softc;
542         struct mbuf     *m;
543         int i = 0;
544
545         for (i = 0; i < sc->nwqs; i++) {
546                 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
547                         m_freem(m);
548         }
549         if_qflush(ifp);
550 }
551
552
553
554 /*****************************************************************************
555  *                   Driver interrupt routines functions                     *
556  *****************************************************************************/
557
558 static void
559 oce_intr(void *arg, int pending)
560 {
561
562         POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
563         POCE_SOFTC sc = ii->sc;
564         struct oce_eq *eq = ii->eq;
565         struct oce_eqe *eqe;
566         struct oce_cq *cq = NULL;
567         int i, num_eqes = 0;
568
569
570         bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
571                                  BUS_DMASYNC_POSTWRITE);
572         do {
573                 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
574                 if (eqe->evnt == 0)
575                         break;
576                 eqe->evnt = 0;
577                 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
578                                         BUS_DMASYNC_POSTWRITE);
579                 RING_GET(eq->ring, 1);
580                 num_eqes++;
581
582         } while (TRUE);
583         
584         if (!num_eqes)
585                 goto eq_arm; /* Spurious */
586
587         /* Clear EQ entries, but dont arm */
588         oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
589
590         /* Process TX, RX and MCC. But dont arm CQ*/
591         for (i = 0; i < eq->cq_valid; i++) {
592                 cq = eq->cq[i];
593                 (*cq->cq_handler)(cq->cb_arg);
594         }
595
596         /* Arm all cqs connected to this EQ */
597         for (i = 0; i < eq->cq_valid; i++) {
598                 cq = eq->cq[i];
599                 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
600         }
601
602 eq_arm:
603         oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
604         return;
605 }
606
607
608 static int
609 oce_setup_intr(POCE_SOFTC sc)
610 {
611         int rc = 0, use_intx = 0;
612         int vector = 0, req_vectors = 0;
613
614         if (sc->rss_enable)
615                 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
616         else
617                 req_vectors = 1;
618
619         if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
620                 sc->intr_count = req_vectors;
621                 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
622                 if (rc != 0) {
623                         use_intx = 1;
624                         pci_release_msi(sc->dev);
625                 } else
626                         sc->flags |= OCE_FLAGS_USING_MSIX;
627         } else
628                 use_intx = 1;
629
630         if (use_intx)
631                 sc->intr_count = 1;
632
633         /* Scale number of queues based on intr we got */
634         update_queues_got(sc);
635
636         if (use_intx) {
637                 device_printf(sc->dev, "Using legacy interrupt\n");
638                 rc = oce_alloc_intr(sc, vector, oce_intr);
639                 if (rc)
640                         goto error;             
641         } else {
642                 for (; vector < sc->intr_count; vector++) {
643                         rc = oce_alloc_intr(sc, vector, oce_intr);
644                         if (rc)
645                                 goto error;
646                 }
647         }
648
649         return 0;
650 error:
651         oce_intr_free(sc);
652         return rc;
653 }
654
655
656 static int
657 oce_fast_isr(void *arg)
658 {
659         POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
660         POCE_SOFTC sc = ii->sc;
661
662         if (ii->eq == NULL)
663                 return FILTER_STRAY;
664
665         oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
666
667         taskqueue_enqueue_fast(ii->tq, &ii->task);
668
669         return FILTER_HANDLED;
670 }
671
672
673 static int
674 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
675 {
676         POCE_INTR_INFO ii = &sc->intrs[vector];
677         int rc = 0, rr;
678
679         if (vector >= OCE_MAX_EQ)
680                 return (EINVAL);
681
682         /* Set the resource id for the interrupt.
683          * MSIx is vector + 1 for the resource id,
684          * INTx is 0 for the resource id.
685          */
686         if (sc->flags & OCE_FLAGS_USING_MSIX)
687                 rr = vector + 1;
688         else
689                 rr = 0;
690         ii->intr_res = bus_alloc_resource_any(sc->dev,
691                                               SYS_RES_IRQ,
692                                               &rr, RF_ACTIVE|RF_SHAREABLE);
693         ii->irq_rr = rr;
694         if (ii->intr_res == NULL) {
695                 device_printf(sc->dev,
696                           "Could not allocate interrupt\n");
697                 rc = ENXIO;
698                 return rc;
699         }
700
701         TASK_INIT(&ii->task, 0, isr, ii);
702         ii->vector = vector;
703         sprintf(ii->task_name, "oce_task[%d]", ii->vector);
704         ii->tq = taskqueue_create_fast(ii->task_name,
705                         M_NOWAIT,
706                         taskqueue_thread_enqueue,
707                         &ii->tq);
708         taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
709                         device_get_nameunit(sc->dev));
710
711         ii->sc = sc;
712         rc = bus_setup_intr(sc->dev,
713                         ii->intr_res,
714                         INTR_TYPE_NET,
715                         oce_fast_isr, NULL, ii, &ii->tag);
716         return rc;
717
718 }
719
720
721 void
722 oce_intr_free(POCE_SOFTC sc)
723 {
724         int i = 0;
725         
726         for (i = 0; i < sc->intr_count; i++) {
727                 
728                 if (sc->intrs[i].tag != NULL)
729                         bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
730                                                 sc->intrs[i].tag);
731                 if (sc->intrs[i].tq != NULL)
732                         taskqueue_free(sc->intrs[i].tq);
733                 
734                 if (sc->intrs[i].intr_res != NULL)
735                         bus_release_resource(sc->dev, SYS_RES_IRQ,
736                                                 sc->intrs[i].irq_rr,
737                                                 sc->intrs[i].intr_res);
738                 sc->intrs[i].tag = NULL;
739                 sc->intrs[i].intr_res = NULL;
740         }
741
742         if (sc->flags & OCE_FLAGS_USING_MSIX)
743                 pci_release_msi(sc->dev);
744
745 }
746
747
748
749 /******************************************************************************
750 *                         Media callbacks functions                           *
751 ******************************************************************************/
752
753 static void
754 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
755 {
756         POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
757
758
759         req->ifm_status = IFM_AVALID;
760         req->ifm_active = IFM_ETHER;
761         
762         if (sc->link_status == 1)
763                 req->ifm_status |= IFM_ACTIVE;
764         else 
765                 return;
766         
767         switch (sc->link_speed) {
768         case 1: /* 10 Mbps */
769                 req->ifm_active |= IFM_10_T | IFM_FDX;
770                 sc->speed = 10;
771                 break;
772         case 2: /* 100 Mbps */
773                 req->ifm_active |= IFM_100_TX | IFM_FDX;
774                 sc->speed = 100;
775                 break;
776         case 3: /* 1 Gbps */
777                 req->ifm_active |= IFM_1000_T | IFM_FDX;
778                 sc->speed = 1000;
779                 break;
780         case 4: /* 10 Gbps */
781                 req->ifm_active |= IFM_10G_SR | IFM_FDX;
782                 sc->speed = 10000;
783                 break;
784         }
785         
786         return;
787 }
788
789
790 int
791 oce_media_change(struct ifnet *ifp)
792 {
793         return 0;
794 }
795
796
797
798
799 /*****************************************************************************
800  *                        Transmit routines functions                        *
801  *****************************************************************************/
802
803 static int
804 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
805 {
806         int rc = 0, i, retry_cnt = 0;
807         bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
808         struct mbuf *m, *m_temp;
809         struct oce_wq *wq = sc->wq[wq_index];
810         struct oce_packet_desc *pd;
811         uint32_t out;
812         struct oce_nic_hdr_wqe *nichdr;
813         struct oce_nic_frag_wqe *nicfrag;
814         int num_wqes;
815         uint32_t reg_value;
816 #if defined(INET6) || defined(INET)
817         uint16_t mss = 0;
818 #endif
819
820         m = *mpp;
821         if (!m)
822                 return EINVAL;
823
824         if (!(m->m_flags & M_PKTHDR)) {
825                 rc = ENXIO;
826                 goto free_ret;
827         }
828
829         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
830 #if defined(INET6) || defined(INET)
831                 /* consolidate packet buffers for TSO/LSO segment offload */
832                 m = oce_tso_setup(sc, mpp, &mss);
833 #else
834                 m = NULL;
835 #endif
836                 if (m == NULL) {
837                         rc = ENXIO;
838                         goto free_ret;
839                 }
840         }
841
842         out = wq->packets_out + 1;
843         if (out == OCE_WQ_PACKET_ARRAY_SIZE)
844                 out = 0;
845         if (out == wq->packets_in)
846                 return EBUSY;
847
848         pd = &wq->pckts[wq->packets_out];
849 retry:
850         rc = bus_dmamap_load_mbuf_sg(wq->tag,
851                                      pd->map,
852                                      m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
853         if (rc == 0) {
854                 num_wqes = pd->nsegs + 1;
855                 if (IS_BE(sc)) {
856                         /*Dummy required only for BE3.*/
857                         if (num_wqes & 1)
858                                 num_wqes++;
859                 }
860                 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
861                         bus_dmamap_unload(wq->tag, pd->map);
862                         return EBUSY;
863                 }
864
865                 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
866                 pd->mbuf = m;
867                 wq->packets_out = out;
868
869                 nichdr =
870                     RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
871                 nichdr->u0.dw[0] = 0;
872                 nichdr->u0.dw[1] = 0;
873                 nichdr->u0.dw[2] = 0;
874                 nichdr->u0.dw[3] = 0;
875
876                 nichdr->u0.s.complete = 1;
877                 nichdr->u0.s.event = 1;
878                 nichdr->u0.s.crc = 1;
879                 nichdr->u0.s.forward = 0;
880                 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
881                 nichdr->u0.s.udpcs =
882                     (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
883                 nichdr->u0.s.tcpcs =
884                     (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
885                 nichdr->u0.s.num_wqe = num_wqes;
886                 nichdr->u0.s.total_length = m->m_pkthdr.len;
887                 if (m->m_flags & M_VLANTAG) {
888                         nichdr->u0.s.vlan = 1; /*Vlan present*/
889                         nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
890                 }
891                 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
892                         if (m->m_pkthdr.tso_segsz) {
893                                 nichdr->u0.s.lso = 1;
894                                 nichdr->u0.s.lso_mss  = m->m_pkthdr.tso_segsz;
895                         }
896                         if (!IS_BE(sc))
897                                 nichdr->u0.s.ipcs = 1;
898                 }
899
900                 RING_PUT(wq->ring, 1);
901                 wq->ring->num_used++;
902
903                 for (i = 0; i < pd->nsegs; i++) {
904                         nicfrag =
905                             RING_GET_PRODUCER_ITEM_VA(wq->ring,
906                                                       struct oce_nic_frag_wqe);
907                         nicfrag->u0.s.rsvd0 = 0;
908                         nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
909                         nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
910                         nicfrag->u0.s.frag_len = segs[i].ds_len;
911                         pd->wqe_idx = wq->ring->pidx;
912                         RING_PUT(wq->ring, 1);
913                         wq->ring->num_used++;
914                 }
915                 if (num_wqes > (pd->nsegs + 1)) {
916                         nicfrag =
917                             RING_GET_PRODUCER_ITEM_VA(wq->ring,
918                                                       struct oce_nic_frag_wqe);
919                         nicfrag->u0.dw[0] = 0;
920                         nicfrag->u0.dw[1] = 0;
921                         nicfrag->u0.dw[2] = 0;
922                         nicfrag->u0.dw[3] = 0;
923                         pd->wqe_idx = wq->ring->pidx;
924                         RING_PUT(wq->ring, 1);
925                         wq->ring->num_used++;
926                         pd->nsegs++;
927                 }
928
929                 sc->ifp->if_opackets++;
930                 wq->tx_stats.tx_reqs++;
931                 wq->tx_stats.tx_wrbs += num_wqes;
932                 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
933                 wq->tx_stats.tx_pkts++;
934         
935                 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
936                                 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
937                 reg_value = (num_wqes << 16) | wq->wq_id;
938                 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
939
940         } else if (rc == EFBIG) {
941                 if (retry_cnt == 0) {
942                         m_temp = m_defrag(m, M_DONTWAIT);
943                         if (m_temp == NULL)
944                                 goto free_ret;
945                         m = m_temp;
946                         *mpp = m_temp;
947                         retry_cnt = retry_cnt + 1;
948                         goto retry;
949                 } else
950                         goto free_ret;
951         } else if (rc == ENOMEM)
952                 return rc;
953         else
954                 goto free_ret;
955
956         return 0;
957
958 free_ret:
959         m_freem(*mpp);
960         *mpp = NULL;
961         return rc;
962 }
963
964
965 static void
966 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
967 {
968         uint32_t in;
969         struct oce_packet_desc *pd;
970         POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
971         struct mbuf *m;
972
973         if (wq->packets_out == wq->packets_in)
974                 device_printf(sc->dev, "WQ transmit descriptor missing\n");
975
976         in = wq->packets_in + 1;
977         if (in == OCE_WQ_PACKET_ARRAY_SIZE)
978                 in = 0;
979
980         pd = &wq->pckts[wq->packets_in];
981         wq->packets_in = in;
982         wq->ring->num_used -= (pd->nsegs + 1);
983         bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
984         bus_dmamap_unload(wq->tag, pd->map);
985
986         m = pd->mbuf;
987         m_freem(m);
988         pd->mbuf = NULL;
989
990         if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
991                 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
992                         sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
993                         oce_tx_restart(sc, wq); 
994                 }
995         }
996 }
997
998
999 static void
1000 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1001 {
1002
1003         if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1004                 return;
1005
1006 #if __FreeBSD_version >= 800000
1007         if (!drbr_empty(sc->ifp, wq->br))
1008 #else
1009         if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1010 #endif
1011                 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1012
1013 }
1014
1015 #if defined(INET6) || defined(INET)
1016 static struct mbuf *
1017 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp, uint16_t *mss)
1018 {
1019         struct mbuf *m;
1020 #ifdef INET
1021         struct ip *ip;
1022 #endif
1023 #ifdef INET6
1024         struct ip6_hdr *ip6;
1025 #endif
1026         struct ether_vlan_header *eh;
1027         struct tcphdr *th;
1028         int total_len = 0;
1029         uint16_t etype;
1030         int ehdrlen = 0;
1031         
1032         m = *mpp;
1033         *mss = m->m_pkthdr.tso_segsz;
1034
1035         if (M_WRITABLE(m) == 0) {
1036                 m = m_dup(*mpp, M_DONTWAIT);
1037                 if (!m)
1038                         return NULL;
1039                 m_freem(*mpp);
1040                 *mpp = m;
1041         }
1042
1043         eh = mtod(m, struct ether_vlan_header *);
1044         if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1045                 etype = ntohs(eh->evl_proto);
1046                 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1047         } else {
1048                 etype = ntohs(eh->evl_encap_proto);
1049                 ehdrlen = ETHER_HDR_LEN;
1050         }
1051
1052         
1053         switch (etype) {
1054 #ifdef INET
1055         case ETHERTYPE_IP:
1056                 ip = (struct ip *)(m->m_data + ehdrlen);
1057                 if (ip->ip_p != IPPROTO_TCP)
1058                         return NULL;
1059                 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1060
1061                 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1062                 break;
1063 #endif
1064 #ifdef INET6
1065         case ETHERTYPE_IPV6:
1066                 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1067                 if (ip6->ip6_nxt != IPPROTO_TCP)
1068                         return NULL;
1069                 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1070
1071                 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1072                 break;
1073 #endif
1074         default:
1075                 return NULL;
1076         }
1077         
1078         m = m_pullup(m, total_len);
1079         if (!m)
1080                 return NULL;
1081         *mpp = m;
1082         return m;
1083         
1084 }
1085 #endif /* INET6 || INET */
1086
1087
1088 void
1089 oce_tx_task(void *arg, int npending)
1090 {
1091         struct oce_wq *wq = arg;
1092         POCE_SOFTC sc = wq->parent;
1093         struct ifnet *ifp = sc->ifp;
1094         int rc = 0;
1095         
1096 #if __FreeBSD_version >= 800000
1097         if (TRY_LOCK(&wq->tx_lock)) {
1098                 rc = oce_multiq_transmit(ifp, NULL, wq);
1099                 if (rc) {
1100                         device_printf(sc->dev,
1101                          "TX[%d] restart failed\n", wq->queue_index);
1102                 }
1103                 UNLOCK(&wq->tx_lock);
1104         }
1105 #else
1106         oce_start(ifp);
1107 #endif
1108
1109 }
1110
1111
1112 void
1113 oce_start(struct ifnet *ifp)
1114 {
1115         POCE_SOFTC sc = ifp->if_softc;
1116         struct mbuf *m;
1117         int rc = 0;
1118
1119         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1120                         IFF_DRV_RUNNING)
1121                 return;
1122         
1123         do {
1124                 IF_DEQUEUE(&sc->ifp->if_snd, m);
1125                 if (m == NULL)
1126                         break;
1127                 /* oce_start always uses default TX queue 0 */
1128                 LOCK(&sc->wq[0]->tx_lock);
1129                 rc = oce_tx(sc, &m, 0);
1130                 UNLOCK(&sc->wq[0]->tx_lock);
1131                 if (rc) {
1132                         if (m != NULL) {
1133                                 sc->wq[0]->tx_stats.tx_stops ++;
1134                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1135                                 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1136                                 m = NULL;
1137                         }
1138                         break;
1139                 }
1140                 if (m != NULL)
1141                         ETHER_BPF_MTAP(ifp, m);
1142
1143         } while (1);
1144
1145         return;
1146 }
1147
1148
1149 /* Handle the Completion Queue for transmit */
1150 uint16_t
1151 oce_wq_handler(void *arg)
1152 {
1153         struct oce_wq *wq = (struct oce_wq *)arg;
1154         POCE_SOFTC sc = wq->parent;
1155         struct oce_cq *cq = wq->cq;
1156         struct oce_nic_tx_cqe *cqe;
1157         int num_cqes = 0;
1158
1159         LOCK(&wq->tx_lock);
1160         bus_dmamap_sync(cq->ring->dma.tag,
1161                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1162         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1163         while (cqe->u0.dw[3]) {
1164                 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1165
1166                 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1167                 if (wq->ring->cidx >= wq->ring->num_items)
1168                         wq->ring->cidx -= wq->ring->num_items;
1169
1170                 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1171                 wq->tx_stats.tx_compl++;
1172                 cqe->u0.dw[3] = 0;
1173                 RING_GET(cq->ring, 1);
1174                 bus_dmamap_sync(cq->ring->dma.tag,
1175                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1176                 cqe =
1177                     RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1178                 num_cqes++;
1179         }
1180
1181         if (num_cqes)
1182                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1183         UNLOCK(&wq->tx_lock);
1184
1185         return 0;
1186 }
1187
1188
1189 static int 
1190 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1191 {
1192         POCE_SOFTC sc = ifp->if_softc;
1193         int status = 0, queue_index = 0;
1194         struct mbuf *next = NULL;
1195         struct buf_ring *br = NULL;
1196
1197         br  = wq->br;
1198         queue_index = wq->queue_index;
1199
1200         if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1201                 IFF_DRV_RUNNING) {
1202                 if (m != NULL)
1203                         status = drbr_enqueue(ifp, br, m);
1204                 return status;
1205         }
1206
1207         if (m == NULL)
1208                 next = drbr_dequeue(ifp, br);           
1209         else if (drbr_needs_enqueue(ifp, br)) {
1210                 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1211                         return status;
1212                 next = drbr_dequeue(ifp, br);
1213         } else
1214                 next = m;
1215
1216         while (next != NULL) {
1217                 if (oce_tx(sc, &next, queue_index)) {
1218                         if (next != NULL) {
1219                                 wq->tx_stats.tx_stops ++;
1220                                 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1221                                 status = drbr_enqueue(ifp, br, next);
1222                         }  
1223                         break;
1224                 }
1225                 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
1226                 ETHER_BPF_MTAP(ifp, next);
1227                 next = drbr_dequeue(ifp, br);
1228         }
1229
1230         return status;
1231 }
1232
1233
1234
1235
1236 /*****************************************************************************
1237  *                          Receive  routines functions                      *
1238  *****************************************************************************/
1239
1240 static void
1241 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1242 {
1243         uint32_t out;
1244         struct oce_packet_desc *pd;
1245         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1246         int i, len, frag_len;
1247         struct mbuf *m = NULL, *tail = NULL;
1248         uint16_t vtag;
1249
1250         len = cqe->u0.s.pkt_size;
1251         vtag = cqe->u0.s.vlan_tag;
1252         if (!len) {
1253                 /*partial DMA workaround for Lancer*/
1254                 oce_discard_rx_comp(rq, cqe);
1255                 goto exit;
1256         }
1257
1258         for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1259
1260                 if (rq->packets_out == rq->packets_in) {
1261                         device_printf(sc->dev,
1262                                   "RQ transmit descriptor missing\n");
1263                 }
1264                 out = rq->packets_out + 1;
1265                 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1266                         out = 0;
1267                 pd = &rq->pckts[rq->packets_out];
1268                 rq->packets_out = out;
1269
1270                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1271                 bus_dmamap_unload(rq->tag, pd->map);
1272                 rq->pending--;
1273
1274                 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1275                 pd->mbuf->m_len = frag_len;
1276
1277                 if (tail != NULL) {
1278                         /* additional fragments */
1279                         pd->mbuf->m_flags &= ~M_PKTHDR;
1280                         tail->m_next = pd->mbuf;
1281                         tail = pd->mbuf;
1282                 } else {
1283                         /* first fragment, fill out much of the packet header */
1284                         pd->mbuf->m_pkthdr.len = len;
1285                         pd->mbuf->m_pkthdr.csum_flags = 0;
1286                         if (IF_CSUM_ENABLED(sc)) {
1287                                 if (cqe->u0.s.l4_cksum_pass) {
1288                                         pd->mbuf->m_pkthdr.csum_flags |=
1289                                             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1290                                         pd->mbuf->m_pkthdr.csum_data = 0xffff;
1291                                 }
1292                                 if (cqe->u0.s.ip_cksum_pass) {
1293                                         if (!cqe->u0.s.ip_ver) { //IPV4 
1294                                                 pd->mbuf->m_pkthdr.csum_flags |=
1295                                                 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1296                                         }
1297                                 }
1298                         }
1299                         m = tail = pd->mbuf;
1300                 }
1301                 pd->mbuf = NULL;
1302                 len -= frag_len;
1303         }
1304
1305         if (m) {
1306                 if (!oce_cqe_portid_valid(sc, cqe)) {
1307                          m_freem(m);
1308                          goto exit;
1309                 } 
1310
1311                 m->m_pkthdr.rcvif = sc->ifp;
1312 #if __FreeBSD_version >= 800000
1313                 m->m_pkthdr.flowid = rq->queue_index;
1314                 m->m_flags |= M_FLOWID;
1315 #endif
1316                 //This deternies if vlan tag is present
1317                 if (oce_cqe_vtp_valid(sc, cqe)) { 
1318                         if (sc->function_mode & FNM_FLEX10_MODE) {
1319                                 /* FLEX10 */
1320                                 if (cqe->u0.s.qnq) {
1321                                         /* If QnQ is not set, neglect VLAN */
1322                                         if (IS_BE(sc))  
1323                                                 m->m_pkthdr.ether_vtag = 
1324                                                                 BSWAP_16(vtag);
1325                                         else
1326                                                 m->m_pkthdr.ether_vtag = vtag;
1327                                         m->m_flags |= M_VLANTAG;
1328                                 }
1329                         } else {
1330                                 if (IS_BE(sc))  
1331                                         m->m_pkthdr.ether_vtag = BSWAP_16(vtag);
1332                                 else
1333                                         m->m_pkthdr.ether_vtag = vtag;
1334                                 m->m_flags |= M_VLANTAG;
1335                         }
1336                 }
1337
1338                 sc->ifp->if_ipackets++;
1339 #if defined(INET6) || defined(INET)
1340                 /* Try to queue to LRO */
1341                 if (IF_LRO_ENABLED(sc) &&
1342                     !(m->m_flags & M_VLANTAG) &&
1343                     (cqe->u0.s.ip_cksum_pass) &&
1344                     (cqe->u0.s.l4_cksum_pass) &&
1345                     (!cqe->u0.s.ip_ver)       &&
1346                     (rq->lro.lro_cnt != 0)) {
1347
1348                         if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1349                                 rq->lro_pkts_queued ++;         
1350                                 goto post_done;
1351                         }
1352                         /* If LRO posting fails then try to post to STACK */
1353                 }
1354 #endif
1355         
1356                 (*sc->ifp->if_input) (sc->ifp, m);
1357 #if defined(INET6) || defined(INET)
1358 post_done:
1359 #endif
1360                 /* Update rx stats per queue */
1361                 rq->rx_stats.rx_pkts++;
1362                 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1363                 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1364                 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1365                         rq->rx_stats.rx_mcast_pkts++;
1366                 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1367                         rq->rx_stats.rx_ucast_pkts++;
1368         }
1369 exit:
1370         return;
1371 }
1372
1373
1374 static void
1375 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1376 {
1377         uint32_t out, i = 0;
1378         struct oce_packet_desc *pd;
1379         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1380         int num_frags = cqe->u0.s.num_fragments;
1381
1382         if (IS_XE201(sc) && cqe->u0.s.error) {
1383                 /* Lancer A0 workaround
1384                 * num_frags will be 1 more than actual in case of error
1385                  */
1386                 if (num_frags)
1387                         num_frags -= 1;
1388         }
1389         for (i = 0; i < num_frags; i++) {
1390                 if (rq->packets_out == rq->packets_in) {
1391                         device_printf(sc->dev,
1392                                 "RQ transmit descriptor missing\n");
1393                 }
1394                 out = rq->packets_out + 1;
1395                 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1396                         out = 0;
1397                 pd = &rq->pckts[rq->packets_out];
1398                 rq->packets_out = out;
1399
1400                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1401                 bus_dmamap_unload(rq->tag, pd->map);
1402                 rq->pending--;
1403                 m_freem(pd->mbuf);
1404         }
1405
1406 }
1407
1408
1409 static int
1410 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1411 {
1412         struct oce_nic_rx_cqe_v1 *cqe_v1;
1413         int vtp = 0;
1414
1415         if (sc->be3_native) {
1416                 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1417                 vtp =  cqe_v1->u0.s.vlan_tag_present; 
1418         } else {
1419                 vtp = cqe->u0.s.vlan_tag_present;
1420         }
1421         
1422         return vtp;
1423
1424 }
1425
1426
1427 static int
1428 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1429 {
1430         struct oce_nic_rx_cqe_v1 *cqe_v1;
1431         int port_id = 0;
1432
1433         if (sc->be3_native && IS_BE(sc)) {
1434                 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1435                 port_id =  cqe_v1->u0.s.port;
1436                 if (sc->port_id != port_id)
1437                         return 0;
1438         } else
1439                 ;/* For BE3 legacy and Lancer this is dummy */
1440         
1441         return 1;
1442
1443 }
1444
1445
1446 #if defined(INET6) || defined(INET)
1447 static void
1448 oce_rx_flush_lro(struct oce_rq *rq)
1449 {
1450         struct lro_ctrl *lro = &rq->lro;
1451         struct lro_entry *queued;
1452         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1453
1454         if (!IF_LRO_ENABLED(sc))
1455                 return;
1456
1457         while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1458                 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1459                 tcp_lro_flush(lro, queued);
1460         }
1461         rq->lro_pkts_queued = 0;
1462         
1463         return;
1464 }
1465
1466
1467 static int
1468 oce_init_lro(POCE_SOFTC sc)
1469 {
1470         struct lro_ctrl *lro = NULL;
1471         int i = 0, rc = 0;
1472
1473         for (i = 0; i < sc->nrqs; i++) { 
1474                 lro = &sc->rq[i]->lro;
1475                 rc = tcp_lro_init(lro);
1476                 if (rc != 0) {
1477                         device_printf(sc->dev, "LRO init failed\n");
1478                         return rc;              
1479                 }
1480                 lro->ifp = sc->ifp;
1481         }
1482
1483         return rc;              
1484 }
1485 #endif /* INET6 || INET */
1486
1487 void
1488 oce_free_lro(POCE_SOFTC sc)
1489 {
1490 #if defined(INET6) || defined(INET)
1491         struct lro_ctrl *lro = NULL;
1492         int i = 0;
1493
1494         for (i = 0; i < sc->nrqs; i++) {
1495                 lro = &sc->rq[i]->lro;
1496                 if (lro)
1497                         tcp_lro_free(lro);
1498         }
1499 #endif
1500 }
1501
1502
1503 int
1504 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1505 {
1506         POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1507         int i, in, rc;
1508         struct oce_packet_desc *pd;
1509         bus_dma_segment_t segs[6];
1510         int nsegs, added = 0;
1511         struct oce_nic_rqe *rqe;
1512         pd_rxulp_db_t rxdb_reg;
1513
1514
1515         for (i = 0; i < count; i++) {
1516                 in = rq->packets_in + 1;
1517                 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1518                         in = 0;
1519                 if (in == rq->packets_out)
1520                         break;  /* no more room */
1521
1522                 pd = &rq->pckts[rq->packets_in];
1523                 pd->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1524                 if (pd->mbuf == NULL)
1525                         break;
1526
1527                 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1528                 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1529                                              pd->map,
1530                                              pd->mbuf,
1531                                              segs, &nsegs, BUS_DMA_NOWAIT);
1532                 if (rc) {
1533                         m_free(pd->mbuf);
1534                         break;
1535                 }
1536
1537                 if (nsegs != 1) {
1538                         i--;
1539                         continue;
1540                 }
1541
1542                 rq->packets_in = in;
1543                 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1544
1545                 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1546                 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1547                 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1548                 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1549                 RING_PUT(rq->ring, 1);
1550                 added++;
1551                 rq->pending++;
1552         }
1553         if (added != 0) {
1554                 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1555                         DELAY(1);
1556                         rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1557                         rxdb_reg.bits.qid = rq->rq_id;
1558                         OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1559                         added -= OCE_MAX_RQ_POSTS;
1560                 }
1561                 if (added > 0) {
1562                         DELAY(1);
1563                         rxdb_reg.bits.qid = rq->rq_id;
1564                         rxdb_reg.bits.num_posted = added;
1565                         OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1566                 }
1567         }
1568         
1569         return 0;       
1570 }
1571
1572
1573 /* Handle the Completion Queue for receive */
1574 uint16_t
1575 oce_rq_handler(void *arg)
1576 {
1577         struct oce_rq *rq = (struct oce_rq *)arg;
1578         struct oce_cq *cq = rq->cq;
1579         POCE_SOFTC sc = rq->parent;
1580         struct oce_nic_rx_cqe *cqe;
1581         int num_cqes = 0, rq_buffers_used = 0;
1582
1583
1584         LOCK(&rq->rx_lock);
1585         bus_dmamap_sync(cq->ring->dma.tag,
1586                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1587         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1588         while (cqe->u0.dw[2]) {
1589                 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1590
1591                 RING_GET(rq->ring, 1);
1592                 if (cqe->u0.s.error == 0) {
1593                         oce_rx(rq, cqe->u0.s.frag_index, cqe);
1594                 } else {
1595                         rq->rx_stats.rxcp_err++;
1596                         sc->ifp->if_ierrors++;
1597                         if (IS_XE201(sc)) 
1598                                 /* Lancer A0 no buffer workaround */
1599                                 oce_discard_rx_comp(rq, cqe);
1600                         else    
1601                                 /* Post L3/L4 errors to stack.*/
1602                                 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1603                         
1604                 }
1605                 rq->rx_stats.rx_compl++;
1606                 cqe->u0.dw[2] = 0;
1607
1608 #if defined(INET6) || defined(INET)
1609                 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1610                         oce_rx_flush_lro(rq);
1611                 }
1612 #endif
1613
1614                 RING_GET(cq->ring, 1);
1615                 bus_dmamap_sync(cq->ring->dma.tag,
1616                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1617                 cqe =
1618                     RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1619                 num_cqes++;
1620                 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1621                         break;
1622         }
1623 #if defined(INET6) || defined(INET)
1624         if (IF_LRO_ENABLED(sc))
1625                 oce_rx_flush_lro(rq);
1626 #endif
1627         
1628         if (num_cqes) {
1629                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1630                 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1631                 if (rq_buffers_used > 1)
1632                         oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1633         }
1634
1635         UNLOCK(&rq->rx_lock);
1636         
1637         return 0;
1638
1639 }
1640
1641
1642
1643
1644 /*****************************************************************************
1645  *                 Helper function prototypes in this file                   *
1646  *****************************************************************************/
1647
1648 static int 
1649 oce_attach_ifp(POCE_SOFTC sc)
1650 {
1651
1652         sc->ifp = if_alloc(IFT_ETHER);
1653         if (!sc->ifp)
1654                 return ENOMEM;
1655
1656         ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1657         ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1658         ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1659
1660         sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1661         sc->ifp->if_ioctl = oce_ioctl;
1662         sc->ifp->if_start = oce_start;
1663         sc->ifp->if_init = oce_init;
1664         sc->ifp->if_mtu = ETHERMTU;
1665         sc->ifp->if_softc = sc;
1666 #if __FreeBSD_version >= 800000
1667         sc->ifp->if_transmit = oce_multiq_start;
1668         sc->ifp->if_qflush = oce_multiq_flush;
1669 #endif
1670
1671         if_initname(sc->ifp,
1672                     device_get_name(sc->dev), device_get_unit(sc->dev));
1673
1674         sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1675         IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1676         IFQ_SET_READY(&sc->ifp->if_snd);
1677
1678         sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1679         sc->ifp->if_hwassist |= CSUM_TSO;
1680         sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1681
1682         sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1683         sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1684         sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1685 #if defined(INET6) || defined(INET)
1686         sc->ifp->if_capabilities |= IFCAP_TSO;
1687         sc->ifp->if_capabilities |= IFCAP_LRO;
1688 #endif
1689         
1690         sc->ifp->if_capenable = sc->ifp->if_capabilities;
1691         sc->ifp->if_baudrate = IF_Gbps(10UL);
1692
1693         ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1694         
1695         return 0;
1696 }
1697
1698
1699 static void
1700 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1701 {
1702         POCE_SOFTC sc = ifp->if_softc;
1703
1704         if (ifp->if_softc !=  arg)
1705                 return;
1706         if ((vtag == 0) || (vtag > 4095))
1707                 return;
1708
1709         sc->vlan_tag[vtag] = 1;
1710         sc->vlans_added++;
1711         oce_vid_config(sc);
1712 }
1713
1714
1715 static void
1716 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1717 {
1718         POCE_SOFTC sc = ifp->if_softc;
1719
1720         if (ifp->if_softc !=  arg)
1721                 return;
1722         if ((vtag == 0) || (vtag > 4095))
1723                 return;
1724
1725         sc->vlan_tag[vtag] = 0;
1726         sc->vlans_added--;
1727         oce_vid_config(sc);
1728 }
1729
1730
1731 /*
1732  * A max of 64 vlans can be configured in BE. If the user configures
1733  * more, place the card in vlan promiscuous mode.
1734  */
1735 static int
1736 oce_vid_config(POCE_SOFTC sc)
1737 {
1738         struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1739         uint16_t ntags = 0, i;
1740         int status = 0;
1741
1742         if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) && 
1743                         (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1744                 for (i = 0; i < MAX_VLANS; i++) {
1745                         if (sc->vlan_tag[i]) {
1746                                 vtags[ntags].vtag = i;
1747                                 ntags++;
1748                         }
1749                 }
1750                 if (ntags)
1751                         status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1752                                                 vtags, ntags, 1, 0); 
1753         } else 
1754                 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1755                                                 NULL, 0, 1, 1);
1756         return status;
1757 }
1758
1759
1760 static void
1761 oce_mac_addr_set(POCE_SOFTC sc)
1762 {
1763         uint32_t old_pmac_id = sc->pmac_id;
1764         int status = 0;
1765
1766         
1767         status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1768                          sc->macaddr.size_of_struct);
1769         if (!status)
1770                 return;
1771
1772         status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1773                                         sc->if_id, &sc->pmac_id);
1774         if (!status) {
1775                 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1776                 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1777                                  sc->macaddr.size_of_struct); 
1778         }
1779         if (status)
1780                 device_printf(sc->dev, "Failed update macaddress\n");
1781
1782 }
1783
1784
1785 static int
1786 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1787 {
1788         POCE_SOFTC sc = ifp->if_softc;
1789         struct ifreq *ifr = (struct ifreq *)data;
1790         int rc = ENXIO;
1791         char cookie[32] = {0};
1792         void *priv_data = (void *)ifr->ifr_data;
1793         void *ioctl_ptr;
1794         uint32_t req_size;
1795         struct mbx_hdr req;
1796         OCE_DMA_MEM dma_mem;
1797
1798
1799         if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1800                 return EFAULT;
1801         
1802         if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1803                 return EINVAL;
1804         
1805         ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1806         if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1807                 return EFAULT;
1808         
1809         req_size = le32toh(req.u0.req.request_length);
1810         if (req_size > 65536)
1811                 return EINVAL;
1812
1813         req_size += sizeof(struct mbx_hdr);
1814         rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1815         if (rc)
1816                 return ENOMEM;
1817
1818         if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1819                 rc = EFAULT;
1820                 goto dma_free;
1821         }
1822
1823         rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1824         if (rc) {
1825                 rc = EIO;
1826                 goto dma_free;
1827         }
1828
1829         if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1830                 rc =  EFAULT;
1831
1832 dma_free:
1833         oce_dma_free(sc, &dma_mem);
1834         return rc;
1835
1836 }
1837
1838
1839 static void
1840 oce_local_timer(void *arg)
1841 {
1842         POCE_SOFTC sc = arg;
1843         int i = 0;
1844         
1845         oce_refresh_nic_stats(sc);
1846         oce_refresh_queue_stats(sc);
1847         oce_mac_addr_set(sc);
1848         
1849         /* TX Watch Dog*/
1850         for (i = 0; i < sc->nwqs; i++)
1851                 oce_tx_restart(sc, sc->wq[i]);
1852         
1853         callout_reset(&sc->timer, hz, oce_local_timer, sc);
1854 }
1855
1856
1857 static void
1858 oce_if_deactivate(POCE_SOFTC sc)
1859 {
1860         int i, mtime = 0;
1861         int wait_req = 0;
1862         struct oce_rq *rq;
1863         struct oce_wq *wq;
1864         struct oce_eq *eq;
1865
1866         sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1867
1868         /*Wait for max of 400ms for TX completions to be done */
1869         while (mtime < 400) {
1870                 wait_req = 0;
1871                 for_all_wq_queues(sc, wq, i) {
1872                         if (wq->ring->num_used) {
1873                                 wait_req = 1;
1874                                 DELAY(1);
1875                                 break;
1876                         }
1877                 }
1878                 mtime += 1;
1879                 if (!wait_req)
1880                         break;
1881         }
1882
1883         /* Stop intrs and finish any bottom halves pending */
1884         oce_hw_intr_disable(sc);
1885
1886         for (i = 0; i < sc->intr_count; i++) {
1887                 if (sc->intrs[i].tq != NULL) {
1888                         taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1889                 }
1890         }
1891
1892         /* Delete RX queue in card with flush param */
1893         oce_stop_rx(sc);
1894
1895         /* Invalidate any pending cq and eq entries*/   
1896         for_all_evnt_queues(sc, eq, i)  
1897                 oce_drain_eq(eq);
1898         for_all_rq_queues(sc, rq, i)
1899                 oce_drain_rq_cq(rq);
1900         for_all_wq_queues(sc, wq, i)
1901                 oce_drain_wq_cq(wq);
1902
1903         /* But still we need to get MCC aync events.
1904            So enable intrs and also arm first EQ
1905         */
1906         oce_hw_intr_enable(sc);
1907         oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1908
1909         DELAY(10);
1910 }
1911
1912
1913 static void
1914 oce_if_activate(POCE_SOFTC sc)
1915 {
1916         struct oce_eq *eq;
1917         struct oce_rq *rq;
1918         struct oce_wq *wq;
1919         int i, rc = 0;
1920
1921         sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; 
1922         
1923         oce_hw_intr_disable(sc);
1924         
1925         oce_start_rx(sc);
1926
1927         for_all_rq_queues(sc, rq, i) {
1928                 rc = oce_start_rq(rq);
1929                 if (rc)
1930                         device_printf(sc->dev, "Unable to start RX\n");
1931         }
1932
1933         for_all_wq_queues(sc, wq, i) {
1934                 rc = oce_start_wq(wq);
1935                 if (rc)
1936                         device_printf(sc->dev, "Unable to start TX\n");
1937         }
1938
1939         
1940         for_all_evnt_queues(sc, eq, i)
1941                 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1942
1943         oce_hw_intr_enable(sc);
1944
1945 }
1946
1947 /* Handle the Completion Queue for the Mailbox/Async notifications */
1948 uint16_t
1949 oce_mq_handler(void *arg)
1950 {
1951         struct oce_mq *mq = (struct oce_mq *)arg;
1952         POCE_SOFTC sc = mq->parent;
1953         struct oce_cq *cq = mq->cq;
1954         int num_cqes = 0;
1955         struct oce_mq_cqe *cqe;
1956         struct oce_async_cqe_link_state *acqe;
1957
1958         bus_dmamap_sync(cq->ring->dma.tag,
1959                         cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1960         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1961         while (cqe->u0.dw[3]) {
1962                 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1963                 if (cqe->u0.s.async_event) {
1964                         acqe = (struct oce_async_cqe_link_state *)cqe;
1965                         if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1966                             ASYNC_EVENT_LINK_UP) {
1967                                 sc->link_status = ASYNC_EVENT_LINK_UP;
1968                                 if_link_state_change(sc->ifp, LINK_STATE_UP);
1969                         } else {
1970                                 sc->link_status = ASYNC_EVENT_LINK_DOWN;
1971                                 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1972                         }
1973
1974                         if (acqe->u0.s.event_code ==
1975                                 ASYNC_EVENT_CODE_LINK_STATE) {
1976                                 sc->link_speed = acqe->u0.s.speed;
1977                                 sc->qos_link_speed =
1978                                 (uint32_t )acqe->u0.s.qos_link_speed * 10;
1979                         }
1980                 }
1981                 cqe->u0.dw[3] = 0;
1982                 RING_GET(cq->ring, 1);
1983                 RING_GET(mq->ring, 1);
1984                 bus_dmamap_sync(cq->ring->dma.tag,
1985                                 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1986                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1987                 num_cqes++;
1988         }
1989
1990         if (num_cqes)
1991                 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1992
1993         return 0;
1994 }
1995
1996
1997 static void
1998 setup_max_queues_want(POCE_SOFTC sc)
1999 {
2000         int max_rss = 0;
2001
2002         /* Check if it is FLEX machine. Is so dont use RSS */   
2003         if ((sc->function_mode & FNM_FLEX10_MODE) ||
2004                 (!sc->rss_enable) ||
2005                 (sc->flags & OCE_FLAGS_BE2)) {
2006                 sc->nrqs = 1;
2007                 sc->nwqs = 1;
2008                 sc->rss_enable = 0;
2009         } else {
2010                 /* For multiq, our deisgn is to have TX rings equal to 
2011                    RSS rings. So that we can pair up one RSS ring and TX
2012                    to a single intr, which improves CPU cache efficiency.
2013                  */
2014                 if (IS_BE(sc) && (!sc->be3_native))
2015                         max_rss = OCE_LEGACY_MODE_RSS;
2016                 else
2017                         max_rss = OCE_MAX_RSS;
2018
2019                 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2020                 sc->nwqs = MIN(OCE_NCPUS, max_rss);
2021         
2022                 /*Hardware issue. Turn off multi TX for be2 */  
2023                 if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
2024                         sc->nwqs = 1;
2025
2026         }
2027
2028 }
2029
2030
2031 static void
2032 update_queues_got(POCE_SOFTC sc)
2033 {
2034         if (sc->rss_enable) {
2035                 sc->nrqs = sc->intr_count + 1;
2036                 sc->nwqs = sc->intr_count;
2037                 if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
2038                         sc->nwqs = 1;
2039         } else {
2040                 sc->nrqs = 1;
2041                 sc->nwqs = 1;
2042         }
2043 }
2044