2 * Copyright (C) 2012 Emulex
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Contact Information:
32 * freebsd-drivers@emulex.com
36 * Costa Mesa, CA 92626
42 #include "opt_inet6.h"
48 /* Driver entry points prototypes */
49 static int oce_probe(device_t dev);
50 static int oce_attach(device_t dev);
51 static int oce_detach(device_t dev);
52 static int oce_shutdown(device_t dev);
53 static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
54 static void oce_init(void *xsc);
55 static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m);
56 static void oce_multiq_flush(struct ifnet *ifp);
58 /* Driver interrupt routines protypes */
59 static void oce_intr(void *arg, int pending);
60 static int oce_setup_intr(POCE_SOFTC sc);
61 static int oce_fast_isr(void *arg);
62 static int oce_alloc_intr(POCE_SOFTC sc, int vector,
63 void (*isr) (void *arg, int pending));
65 /* Media callbacks prototypes */
66 static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req);
67 static int oce_media_change(struct ifnet *ifp);
69 /* Transmit routines prototypes */
70 static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
71 static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
72 static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
74 #if defined(INET6) || defined(INET)
75 static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp,
78 static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
81 /* Receive routines prototypes */
82 static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
83 static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
84 static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
85 #if defined(INET6) || defined(INET)
86 static void oce_rx_flush_lro(struct oce_rq *rq);
88 static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
89 struct oce_nic_rx_cqe *cqe);
91 /* Helper function prototypes in this file */
92 static int oce_attach_ifp(POCE_SOFTC sc);
93 static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
94 static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag);
95 static int oce_vid_config(POCE_SOFTC sc);
96 static void oce_mac_addr_set(POCE_SOFTC sc);
97 static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data);
98 static void oce_local_timer(void *arg);
99 #if defined(INET6) || defined(INET)
100 static int oce_init_lro(POCE_SOFTC sc);
102 static void oce_if_deactivate(POCE_SOFTC sc);
103 static void oce_if_activate(POCE_SOFTC sc);
104 static void setup_max_queues_want(POCE_SOFTC sc);
105 static void update_queues_got(POCE_SOFTC sc);
107 static device_method_t oce_dispatch[] = {
108 DEVMETHOD(device_probe, oce_probe),
109 DEVMETHOD(device_attach, oce_attach),
110 DEVMETHOD(device_detach, oce_detach),
111 DEVMETHOD(device_shutdown, oce_shutdown),
115 static driver_t oce_driver = {
120 static devclass_t oce_devclass;
123 DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0);
124 MODULE_DEPEND(oce, pci, 1, 1, 1);
125 MODULE_DEPEND(oce, ether, 1, 1, 1);
126 MODULE_VERSION(oce, 1);
130 const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
132 /* Module capabilites and parameters */
133 uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
134 uint32_t oce_enable_rss = OCE_MODCAP_RSS;
137 TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
138 TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
141 /* Supported devices table */
142 static uint32_t supportedDevices[] = {
143 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2,
144 (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3,
145 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3,
146 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201,
147 (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF,
153 /*****************************************************************************
154 * Driver entry points functions *
155 *****************************************************************************/
158 oce_probe(device_t dev)
166 sc = device_get_softc(dev);
167 bzero(sc, sizeof(OCE_SOFTC));
170 vendor = pci_get_vendor(dev);
171 device = pci_get_device(dev);
173 for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint16_t)); i++) {
174 if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
175 if (device == (supportedDevices[i] & 0xffff)) {
176 sprintf(str, "%s:%s",
177 "Emulex CNA NIC function",
179 device_set_desc_copy(dev, str);
182 case PCI_PRODUCT_BE2:
183 sc->flags |= OCE_FLAGS_BE2;
185 case PCI_PRODUCT_BE3:
186 sc->flags |= OCE_FLAGS_BE3;
188 case PCI_PRODUCT_XE201:
189 case PCI_PRODUCT_XE201_VF:
190 sc->flags |= OCE_FLAGS_XE201;
195 return BUS_PROBE_DEFAULT;
205 oce_attach(device_t dev)
210 sc = device_get_softc(dev);
212 rc = oce_hw_pci_alloc(sc);
216 sc->rss_enable = oce_enable_rss;
217 sc->tx_ring_size = OCE_TX_RING_SIZE;
218 sc->rx_ring_size = OCE_RX_RING_SIZE;
219 sc->rq_frag_size = OCE_RQ_BUF_SIZE;
220 sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
221 sc->promisc = OCE_DEFAULT_PROMISCUOUS;
223 LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock");
224 LOCK_CREATE(&sc->dev_lock, "Device_lock");
226 /* initialise the hardware */
227 rc = oce_hw_init(sc);
232 setup_max_queues_want(sc);
235 rc = oce_setup_intr(sc);
240 rc = oce_queue_init_all(sc);
245 rc = oce_attach_ifp(sc);
250 #if defined(INET6) || defined(INET)
251 rc = oce_init_lro(sc);
257 rc = oce_hw_start(sc);
262 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
263 oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST);
264 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
265 oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST);
267 rc = oce_stats_init(sc);
274 callout_init(&sc->timer, CALLOUT_MPSAFE);
275 rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
282 callout_drain(&sc->timer);
286 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
288 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
289 oce_hw_intr_disable(sc);
291 #if defined(INET6) || defined(INET)
295 ether_ifdetach(sc->ifp);
298 oce_queue_release_all(sc);
302 oce_dma_free(sc, &sc->bsmbx);
305 LOCK_DESTROY(&sc->dev_lock);
306 LOCK_DESTROY(&sc->bmbx_lock);
313 oce_detach(device_t dev)
315 POCE_SOFTC sc = device_get_softc(dev);
319 oce_if_deactivate(sc);
321 UNLOCK(&sc->dev_lock);
323 callout_drain(&sc->timer);
325 if (sc->vlan_attach != NULL)
326 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
327 if (sc->vlan_detach != NULL)
328 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
330 ether_ifdetach(sc->ifp);
336 bus_generic_detach(dev);
343 oce_shutdown(device_t dev)
347 rc = oce_detach(dev);
354 oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
356 struct ifreq *ifr = (struct ifreq *)data;
357 POCE_SOFTC sc = ifp->if_softc;
362 case SIOCGIFPSRCADDR_IN6:
363 rc = ether_ioctl(ifp, command, data);
366 case SIOCGIFPSRCADDR:
367 rc = ether_ioctl(ifp, command, data);
371 rc = ether_ioctl(ifp, command, data);
375 rc = ifmedia_ioctl(ifp, ifr, &sc->media, command);
379 rc = ether_ioctl(ifp, command, data);
383 rc = ether_ioctl(ifp, command, data);
386 case SIOCGETMIFCNT_IN6:
387 rc = ether_ioctl(ifp, command, data);
391 if (ifr->ifr_mtu > OCE_MAX_MTU)
394 ifp->if_mtu = ifr->ifr_mtu;
398 if (ifp->if_flags & IFF_UP) {
399 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
400 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
403 device_printf(sc->dev, "Interface Up\n");
407 sc->ifp->if_drv_flags &=
408 ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
409 oce_if_deactivate(sc);
411 UNLOCK(&sc->dev_lock);
413 device_printf(sc->dev, "Interface Down\n");
416 if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) {
418 oce_rxf_set_promiscuous(sc, sc->promisc);
419 } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) {
421 oce_rxf_set_promiscuous(sc, sc->promisc);
428 rc = oce_hw_update_multicast(sc);
430 device_printf(sc->dev,
431 "Update multicast address failed\n");
435 u = ifr->ifr_reqcap ^ ifp->if_capenable;
437 if (u & IFCAP_TXCSUM) {
438 ifp->if_capenable ^= IFCAP_TXCSUM;
439 ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
441 if (IFCAP_TSO & ifp->if_capenable &&
442 !(IFCAP_TXCSUM & ifp->if_capenable)) {
443 ifp->if_capenable &= ~IFCAP_TSO;
444 ifp->if_hwassist &= ~CSUM_TSO;
446 "TSO disabled due to -txcsum.\n");
450 if (u & IFCAP_RXCSUM)
451 ifp->if_capenable ^= IFCAP_RXCSUM;
453 if (u & IFCAP_TSO4) {
454 ifp->if_capenable ^= IFCAP_TSO4;
456 if (IFCAP_TSO & ifp->if_capenable) {
457 if (IFCAP_TXCSUM & ifp->if_capenable)
458 ifp->if_hwassist |= CSUM_TSO;
460 ifp->if_capenable &= ~IFCAP_TSO;
461 ifp->if_hwassist &= ~CSUM_TSO;
463 "Enable txcsum first.\n");
467 ifp->if_hwassist &= ~CSUM_TSO;
470 if (u & IFCAP_VLAN_HWTAGGING)
471 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
473 if (u & IFCAP_VLAN_HWFILTER) {
474 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
478 #if defined(INET6) || defined(INET)
480 ifp->if_capenable ^= IFCAP_LRO;
486 rc = oce_handle_passthrough(ifp, data);
489 rc = ether_ioctl(ifp, command, data);
504 if (sc->ifp->if_flags & IFF_UP) {
505 oce_if_deactivate(sc);
509 UNLOCK(&sc->dev_lock);
515 oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
517 POCE_SOFTC sc = ifp->if_softc;
518 struct oce_wq *wq = NULL;
522 if ((m->m_flags & M_FLOWID) != 0)
523 queue_index = m->m_pkthdr.flowid % sc->nwqs;
525 wq = sc->wq[queue_index];
527 if (TRY_LOCK(&wq->tx_lock)) {
528 status = oce_multiq_transmit(ifp, m, wq);
529 UNLOCK(&wq->tx_lock);
531 status = drbr_enqueue(ifp, wq->br, m);
539 oce_multiq_flush(struct ifnet *ifp)
541 POCE_SOFTC sc = ifp->if_softc;
545 for (i = 0; i < sc->nwqs; i++) {
546 while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL)
554 /*****************************************************************************
555 * Driver interrupt routines functions *
556 *****************************************************************************/
559 oce_intr(void *arg, int pending)
562 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
563 POCE_SOFTC sc = ii->sc;
564 struct oce_eq *eq = ii->eq;
566 struct oce_cq *cq = NULL;
570 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
571 BUS_DMASYNC_POSTWRITE);
573 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
577 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
578 BUS_DMASYNC_POSTWRITE);
579 RING_GET(eq->ring, 1);
585 goto eq_arm; /* Spurious */
587 /* Clear EQ entries, but dont arm */
588 oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE);
590 /* Process TX, RX and MCC. But dont arm CQ*/
591 for (i = 0; i < eq->cq_valid; i++) {
593 (*cq->cq_handler)(cq->cb_arg);
596 /* Arm all cqs connected to this EQ */
597 for (i = 0; i < eq->cq_valid; i++) {
599 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
603 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
609 oce_setup_intr(POCE_SOFTC sc)
611 int rc = 0, use_intx = 0;
612 int vector = 0, req_vectors = 0;
615 req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
619 if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
620 sc->intr_count = req_vectors;
621 rc = pci_alloc_msix(sc->dev, &sc->intr_count);
624 pci_release_msi(sc->dev);
626 sc->flags |= OCE_FLAGS_USING_MSIX;
633 /* Scale number of queues based on intr we got */
634 update_queues_got(sc);
637 device_printf(sc->dev, "Using legacy interrupt\n");
638 rc = oce_alloc_intr(sc, vector, oce_intr);
642 for (; vector < sc->intr_count; vector++) {
643 rc = oce_alloc_intr(sc, vector, oce_intr);
657 oce_fast_isr(void *arg)
659 POCE_INTR_INFO ii = (POCE_INTR_INFO) arg;
660 POCE_SOFTC sc = ii->sc;
665 oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE);
667 taskqueue_enqueue_fast(ii->tq, &ii->task);
669 return FILTER_HANDLED;
674 oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending))
676 POCE_INTR_INFO ii = &sc->intrs[vector];
679 if (vector >= OCE_MAX_EQ)
682 /* Set the resource id for the interrupt.
683 * MSIx is vector + 1 for the resource id,
684 * INTx is 0 for the resource id.
686 if (sc->flags & OCE_FLAGS_USING_MSIX)
690 ii->intr_res = bus_alloc_resource_any(sc->dev,
692 &rr, RF_ACTIVE|RF_SHAREABLE);
694 if (ii->intr_res == NULL) {
695 device_printf(sc->dev,
696 "Could not allocate interrupt\n");
701 TASK_INIT(&ii->task, 0, isr, ii);
703 sprintf(ii->task_name, "oce_task[%d]", ii->vector);
704 ii->tq = taskqueue_create_fast(ii->task_name,
706 taskqueue_thread_enqueue,
708 taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq",
709 device_get_nameunit(sc->dev));
712 rc = bus_setup_intr(sc->dev,
715 oce_fast_isr, NULL, ii, &ii->tag);
722 oce_intr_free(POCE_SOFTC sc)
726 for (i = 0; i < sc->intr_count; i++) {
728 if (sc->intrs[i].tag != NULL)
729 bus_teardown_intr(sc->dev, sc->intrs[i].intr_res,
731 if (sc->intrs[i].tq != NULL)
732 taskqueue_free(sc->intrs[i].tq);
734 if (sc->intrs[i].intr_res != NULL)
735 bus_release_resource(sc->dev, SYS_RES_IRQ,
737 sc->intrs[i].intr_res);
738 sc->intrs[i].tag = NULL;
739 sc->intrs[i].intr_res = NULL;
742 if (sc->flags & OCE_FLAGS_USING_MSIX)
743 pci_release_msi(sc->dev);
749 /******************************************************************************
750 * Media callbacks functions *
751 ******************************************************************************/
754 oce_media_status(struct ifnet *ifp, struct ifmediareq *req)
756 POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc;
759 req->ifm_status = IFM_AVALID;
760 req->ifm_active = IFM_ETHER;
762 if (sc->link_status == 1)
763 req->ifm_status |= IFM_ACTIVE;
767 switch (sc->link_speed) {
768 case 1: /* 10 Mbps */
769 req->ifm_active |= IFM_10_T | IFM_FDX;
772 case 2: /* 100 Mbps */
773 req->ifm_active |= IFM_100_TX | IFM_FDX;
777 req->ifm_active |= IFM_1000_T | IFM_FDX;
780 case 4: /* 10 Gbps */
781 req->ifm_active |= IFM_10G_SR | IFM_FDX;
791 oce_media_change(struct ifnet *ifp)
799 /*****************************************************************************
800 * Transmit routines functions *
801 *****************************************************************************/
804 oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
806 int rc = 0, i, retry_cnt = 0;
807 bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
808 struct mbuf *m, *m_temp;
809 struct oce_wq *wq = sc->wq[wq_index];
810 struct oce_packet_desc *pd;
812 struct oce_nic_hdr_wqe *nichdr;
813 struct oce_nic_frag_wqe *nicfrag;
816 #if defined(INET6) || defined(INET)
824 if (!(m->m_flags & M_PKTHDR)) {
829 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
830 #if defined(INET6) || defined(INET)
831 /* consolidate packet buffers for TSO/LSO segment offload */
832 m = oce_tso_setup(sc, mpp, &mss);
842 out = wq->packets_out + 1;
843 if (out == OCE_WQ_PACKET_ARRAY_SIZE)
845 if (out == wq->packets_in)
848 pd = &wq->pckts[wq->packets_out];
850 rc = bus_dmamap_load_mbuf_sg(wq->tag,
852 m, segs, &pd->nsegs, BUS_DMA_NOWAIT);
854 num_wqes = pd->nsegs + 1;
856 /*Dummy required only for BE3.*/
860 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
861 bus_dmamap_unload(wq->tag, pd->map);
865 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE);
867 wq->packets_out = out;
870 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
871 nichdr->u0.dw[0] = 0;
872 nichdr->u0.dw[1] = 0;
873 nichdr->u0.dw[2] = 0;
874 nichdr->u0.dw[3] = 0;
876 nichdr->u0.s.complete = 1;
877 nichdr->u0.s.event = 1;
878 nichdr->u0.s.crc = 1;
879 nichdr->u0.s.forward = 0;
880 nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0;
882 (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0;
884 (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0;
885 nichdr->u0.s.num_wqe = num_wqes;
886 nichdr->u0.s.total_length = m->m_pkthdr.len;
887 if (m->m_flags & M_VLANTAG) {
888 nichdr->u0.s.vlan = 1; /*Vlan present*/
889 nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag;
891 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
892 if (m->m_pkthdr.tso_segsz) {
893 nichdr->u0.s.lso = 1;
894 nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz;
897 nichdr->u0.s.ipcs = 1;
900 RING_PUT(wq->ring, 1);
901 wq->ring->num_used++;
903 for (i = 0; i < pd->nsegs; i++) {
905 RING_GET_PRODUCER_ITEM_VA(wq->ring,
906 struct oce_nic_frag_wqe);
907 nicfrag->u0.s.rsvd0 = 0;
908 nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr);
909 nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr);
910 nicfrag->u0.s.frag_len = segs[i].ds_len;
911 pd->wqe_idx = wq->ring->pidx;
912 RING_PUT(wq->ring, 1);
913 wq->ring->num_used++;
915 if (num_wqes > (pd->nsegs + 1)) {
917 RING_GET_PRODUCER_ITEM_VA(wq->ring,
918 struct oce_nic_frag_wqe);
919 nicfrag->u0.dw[0] = 0;
920 nicfrag->u0.dw[1] = 0;
921 nicfrag->u0.dw[2] = 0;
922 nicfrag->u0.dw[3] = 0;
923 pd->wqe_idx = wq->ring->pidx;
924 RING_PUT(wq->ring, 1);
925 wq->ring->num_used++;
929 sc->ifp->if_opackets++;
930 wq->tx_stats.tx_reqs++;
931 wq->tx_stats.tx_wrbs += num_wqes;
932 wq->tx_stats.tx_bytes += m->m_pkthdr.len;
933 wq->tx_stats.tx_pkts++;
935 bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
936 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
937 reg_value = (num_wqes << 16) | wq->wq_id;
938 OCE_WRITE_REG32(sc, db, PD_TXULP_DB, reg_value);
940 } else if (rc == EFBIG) {
941 if (retry_cnt == 0) {
942 m_temp = m_defrag(m, M_DONTWAIT);
947 retry_cnt = retry_cnt + 1;
951 } else if (rc == ENOMEM)
966 oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
969 struct oce_packet_desc *pd;
970 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
973 if (wq->packets_out == wq->packets_in)
974 device_printf(sc->dev, "WQ transmit descriptor missing\n");
976 in = wq->packets_in + 1;
977 if (in == OCE_WQ_PACKET_ARRAY_SIZE)
980 pd = &wq->pckts[wq->packets_in];
982 wq->ring->num_used -= (pd->nsegs + 1);
983 bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
984 bus_dmamap_unload(wq->tag, pd->map);
990 if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) {
991 if (wq->ring->num_used < (wq->ring->num_items / 2)) {
992 sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE);
993 oce_tx_restart(sc, wq);
1000 oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq)
1003 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING)
1006 #if __FreeBSD_version >= 800000
1007 if (!drbr_empty(sc->ifp, wq->br))
1009 if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd))
1011 taskqueue_enqueue_fast(taskqueue_swi, &wq->txtask);
1015 #if defined(INET6) || defined(INET)
1016 static struct mbuf *
1017 oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp, uint16_t *mss)
1024 struct ip6_hdr *ip6;
1026 struct ether_vlan_header *eh;
1033 *mss = m->m_pkthdr.tso_segsz;
1035 if (M_WRITABLE(m) == 0) {
1036 m = m_dup(*mpp, M_DONTWAIT);
1043 eh = mtod(m, struct ether_vlan_header *);
1044 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1045 etype = ntohs(eh->evl_proto);
1046 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1048 etype = ntohs(eh->evl_encap_proto);
1049 ehdrlen = ETHER_HDR_LEN;
1056 ip = (struct ip *)(m->m_data + ehdrlen);
1057 if (ip->ip_p != IPPROTO_TCP)
1059 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
1061 total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2);
1065 case ETHERTYPE_IPV6:
1066 ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen);
1067 if (ip6->ip6_nxt != IPPROTO_TCP)
1069 th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr));
1071 total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2);
1078 m = m_pullup(m, total_len);
1085 #endif /* INET6 || INET */
1089 oce_tx_task(void *arg, int npending)
1091 struct oce_wq *wq = arg;
1092 POCE_SOFTC sc = wq->parent;
1093 struct ifnet *ifp = sc->ifp;
1096 #if __FreeBSD_version >= 800000
1097 if (TRY_LOCK(&wq->tx_lock)) {
1098 rc = oce_multiq_transmit(ifp, NULL, wq);
1100 device_printf(sc->dev,
1101 "TX[%d] restart failed\n", wq->queue_index);
1103 UNLOCK(&wq->tx_lock);
1113 oce_start(struct ifnet *ifp)
1115 POCE_SOFTC sc = ifp->if_softc;
1119 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1124 IF_DEQUEUE(&sc->ifp->if_snd, m);
1127 /* oce_start always uses default TX queue 0 */
1128 LOCK(&sc->wq[0]->tx_lock);
1129 rc = oce_tx(sc, &m, 0);
1130 UNLOCK(&sc->wq[0]->tx_lock);
1133 sc->wq[0]->tx_stats.tx_stops ++;
1134 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1135 IFQ_DRV_PREPEND(&ifp->if_snd, m);
1141 ETHER_BPF_MTAP(ifp, m);
1149 /* Handle the Completion Queue for transmit */
1151 oce_wq_handler(void *arg)
1153 struct oce_wq *wq = (struct oce_wq *)arg;
1154 POCE_SOFTC sc = wq->parent;
1155 struct oce_cq *cq = wq->cq;
1156 struct oce_nic_tx_cqe *cqe;
1160 bus_dmamap_sync(cq->ring->dma.tag,
1161 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1162 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1163 while (cqe->u0.dw[3]) {
1164 DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe));
1166 wq->ring->cidx = cqe->u0.s.wqe_index + 1;
1167 if (wq->ring->cidx >= wq->ring->num_items)
1168 wq->ring->cidx -= wq->ring->num_items;
1170 oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
1171 wq->tx_stats.tx_compl++;
1173 RING_GET(cq->ring, 1);
1174 bus_dmamap_sync(cq->ring->dma.tag,
1175 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1177 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1182 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1183 UNLOCK(&wq->tx_lock);
1190 oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
1192 POCE_SOFTC sc = ifp->if_softc;
1193 int status = 0, queue_index = 0;
1194 struct mbuf *next = NULL;
1195 struct buf_ring *br = NULL;
1198 queue_index = wq->queue_index;
1200 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1203 status = drbr_enqueue(ifp, br, m);
1208 next = drbr_dequeue(ifp, br);
1209 else if (drbr_needs_enqueue(ifp, br)) {
1210 if ((status = drbr_enqueue(ifp, br, m)) != 0)
1212 next = drbr_dequeue(ifp, br);
1216 while (next != NULL) {
1217 if (oce_tx(sc, &next, queue_index)) {
1219 wq->tx_stats.tx_stops ++;
1220 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1221 status = drbr_enqueue(ifp, br, next);
1225 drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
1226 ETHER_BPF_MTAP(ifp, next);
1227 next = drbr_dequeue(ifp, br);
1236 /*****************************************************************************
1237 * Receive routines functions *
1238 *****************************************************************************/
1241 oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
1244 struct oce_packet_desc *pd;
1245 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1246 int i, len, frag_len;
1247 struct mbuf *m = NULL, *tail = NULL;
1250 len = cqe->u0.s.pkt_size;
1251 vtag = cqe->u0.s.vlan_tag;
1253 /*partial DMA workaround for Lancer*/
1254 oce_discard_rx_comp(rq, cqe);
1258 for (i = 0; i < cqe->u0.s.num_fragments; i++) {
1260 if (rq->packets_out == rq->packets_in) {
1261 device_printf(sc->dev,
1262 "RQ transmit descriptor missing\n");
1264 out = rq->packets_out + 1;
1265 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1267 pd = &rq->pckts[rq->packets_out];
1268 rq->packets_out = out;
1270 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1271 bus_dmamap_unload(rq->tag, pd->map);
1274 frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
1275 pd->mbuf->m_len = frag_len;
1278 /* additional fragments */
1279 pd->mbuf->m_flags &= ~M_PKTHDR;
1280 tail->m_next = pd->mbuf;
1283 /* first fragment, fill out much of the packet header */
1284 pd->mbuf->m_pkthdr.len = len;
1285 pd->mbuf->m_pkthdr.csum_flags = 0;
1286 if (IF_CSUM_ENABLED(sc)) {
1287 if (cqe->u0.s.l4_cksum_pass) {
1288 pd->mbuf->m_pkthdr.csum_flags |=
1289 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1290 pd->mbuf->m_pkthdr.csum_data = 0xffff;
1292 if (cqe->u0.s.ip_cksum_pass) {
1293 if (!cqe->u0.s.ip_ver) { //IPV4
1294 pd->mbuf->m_pkthdr.csum_flags |=
1295 (CSUM_IP_CHECKED|CSUM_IP_VALID);
1299 m = tail = pd->mbuf;
1306 if (!oce_cqe_portid_valid(sc, cqe)) {
1311 m->m_pkthdr.rcvif = sc->ifp;
1312 #if __FreeBSD_version >= 800000
1313 m->m_pkthdr.flowid = rq->queue_index;
1314 m->m_flags |= M_FLOWID;
1316 //This deternies if vlan tag is present
1317 if (oce_cqe_vtp_valid(sc, cqe)) {
1318 if (sc->function_mode & FNM_FLEX10_MODE) {
1320 if (cqe->u0.s.qnq) {
1321 /* If QnQ is not set, neglect VLAN */
1323 m->m_pkthdr.ether_vtag =
1326 m->m_pkthdr.ether_vtag = vtag;
1327 m->m_flags |= M_VLANTAG;
1331 m->m_pkthdr.ether_vtag = BSWAP_16(vtag);
1333 m->m_pkthdr.ether_vtag = vtag;
1334 m->m_flags |= M_VLANTAG;
1338 sc->ifp->if_ipackets++;
1339 #if defined(INET6) || defined(INET)
1340 /* Try to queue to LRO */
1341 if (IF_LRO_ENABLED(sc) &&
1342 !(m->m_flags & M_VLANTAG) &&
1343 (cqe->u0.s.ip_cksum_pass) &&
1344 (cqe->u0.s.l4_cksum_pass) &&
1345 (!cqe->u0.s.ip_ver) &&
1346 (rq->lro.lro_cnt != 0)) {
1348 if (tcp_lro_rx(&rq->lro, m, 0) == 0) {
1349 rq->lro_pkts_queued ++;
1352 /* If LRO posting fails then try to post to STACK */
1356 (*sc->ifp->if_input) (sc->ifp, m);
1357 #if defined(INET6) || defined(INET)
1360 /* Update rx stats per queue */
1361 rq->rx_stats.rx_pkts++;
1362 rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size;
1363 rq->rx_stats.rx_frags += cqe->u0.s.num_fragments;
1364 if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET)
1365 rq->rx_stats.rx_mcast_pkts++;
1366 if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET)
1367 rq->rx_stats.rx_ucast_pkts++;
1375 oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
1377 uint32_t out, i = 0;
1378 struct oce_packet_desc *pd;
1379 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1380 int num_frags = cqe->u0.s.num_fragments;
1382 if (IS_XE201(sc) && cqe->u0.s.error) {
1383 /* Lancer A0 workaround
1384 * num_frags will be 1 more than actual in case of error
1389 for (i = 0; i < num_frags; i++) {
1390 if (rq->packets_out == rq->packets_in) {
1391 device_printf(sc->dev,
1392 "RQ transmit descriptor missing\n");
1394 out = rq->packets_out + 1;
1395 if (out == OCE_RQ_PACKET_ARRAY_SIZE)
1397 pd = &rq->pckts[rq->packets_out];
1398 rq->packets_out = out;
1400 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1401 bus_dmamap_unload(rq->tag, pd->map);
1410 oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1412 struct oce_nic_rx_cqe_v1 *cqe_v1;
1415 if (sc->be3_native) {
1416 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1417 vtp = cqe_v1->u0.s.vlan_tag_present;
1419 vtp = cqe->u0.s.vlan_tag_present;
1428 oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
1430 struct oce_nic_rx_cqe_v1 *cqe_v1;
1433 if (sc->be3_native && IS_BE(sc)) {
1434 cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe;
1435 port_id = cqe_v1->u0.s.port;
1436 if (sc->port_id != port_id)
1439 ;/* For BE3 legacy and Lancer this is dummy */
1446 #if defined(INET6) || defined(INET)
1448 oce_rx_flush_lro(struct oce_rq *rq)
1450 struct lro_ctrl *lro = &rq->lro;
1451 struct lro_entry *queued;
1452 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1454 if (!IF_LRO_ENABLED(sc))
1457 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1458 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1459 tcp_lro_flush(lro, queued);
1461 rq->lro_pkts_queued = 0;
1468 oce_init_lro(POCE_SOFTC sc)
1470 struct lro_ctrl *lro = NULL;
1473 for (i = 0; i < sc->nrqs; i++) {
1474 lro = &sc->rq[i]->lro;
1475 rc = tcp_lro_init(lro);
1477 device_printf(sc->dev, "LRO init failed\n");
1485 #endif /* INET6 || INET */
1488 oce_free_lro(POCE_SOFTC sc)
1490 #if defined(INET6) || defined(INET)
1491 struct lro_ctrl *lro = NULL;
1494 for (i = 0; i < sc->nrqs; i++) {
1495 lro = &sc->rq[i]->lro;
1504 oce_alloc_rx_bufs(struct oce_rq *rq, int count)
1506 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
1508 struct oce_packet_desc *pd;
1509 bus_dma_segment_t segs[6];
1510 int nsegs, added = 0;
1511 struct oce_nic_rqe *rqe;
1512 pd_rxulp_db_t rxdb_reg;
1515 for (i = 0; i < count; i++) {
1516 in = rq->packets_in + 1;
1517 if (in == OCE_RQ_PACKET_ARRAY_SIZE)
1519 if (in == rq->packets_out)
1520 break; /* no more room */
1522 pd = &rq->pckts[rq->packets_in];
1523 pd->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
1524 if (pd->mbuf == NULL)
1527 pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
1528 rc = bus_dmamap_load_mbuf_sg(rq->tag,
1531 segs, &nsegs, BUS_DMA_NOWAIT);
1542 rq->packets_in = in;
1543 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
1545 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
1546 rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr);
1547 rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr);
1548 DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe));
1549 RING_PUT(rq->ring, 1);
1554 for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
1556 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
1557 rxdb_reg.bits.qid = rq->rq_id;
1558 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1559 added -= OCE_MAX_RQ_POSTS;
1563 rxdb_reg.bits.qid = rq->rq_id;
1564 rxdb_reg.bits.num_posted = added;
1565 OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
1573 /* Handle the Completion Queue for receive */
1575 oce_rq_handler(void *arg)
1577 struct oce_rq *rq = (struct oce_rq *)arg;
1578 struct oce_cq *cq = rq->cq;
1579 POCE_SOFTC sc = rq->parent;
1580 struct oce_nic_rx_cqe *cqe;
1581 int num_cqes = 0, rq_buffers_used = 0;
1585 bus_dmamap_sync(cq->ring->dma.tag,
1586 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1587 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1588 while (cqe->u0.dw[2]) {
1589 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1591 RING_GET(rq->ring, 1);
1592 if (cqe->u0.s.error == 0) {
1593 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1595 rq->rx_stats.rxcp_err++;
1596 sc->ifp->if_ierrors++;
1598 /* Lancer A0 no buffer workaround */
1599 oce_discard_rx_comp(rq, cqe);
1601 /* Post L3/L4 errors to stack.*/
1602 oce_rx(rq, cqe->u0.s.frag_index, cqe);
1605 rq->rx_stats.rx_compl++;
1608 #if defined(INET6) || defined(INET)
1609 if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) {
1610 oce_rx_flush_lro(rq);
1614 RING_GET(cq->ring, 1);
1615 bus_dmamap_sync(cq->ring->dma.tag,
1616 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1618 RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1620 if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
1623 #if defined(INET6) || defined(INET)
1624 if (IF_LRO_ENABLED(sc))
1625 oce_rx_flush_lro(rq);
1629 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1630 rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
1631 if (rq_buffers_used > 1)
1632 oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
1635 UNLOCK(&rq->rx_lock);
1644 /*****************************************************************************
1645 * Helper function prototypes in this file *
1646 *****************************************************************************/
1649 oce_attach_ifp(POCE_SOFTC sc)
1652 sc->ifp = if_alloc(IFT_ETHER);
1656 ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
1657 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1658 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1660 sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST;
1661 sc->ifp->if_ioctl = oce_ioctl;
1662 sc->ifp->if_start = oce_start;
1663 sc->ifp->if_init = oce_init;
1664 sc->ifp->if_mtu = ETHERMTU;
1665 sc->ifp->if_softc = sc;
1666 #if __FreeBSD_version >= 800000
1667 sc->ifp->if_transmit = oce_multiq_start;
1668 sc->ifp->if_qflush = oce_multiq_flush;
1671 if_initname(sc->ifp,
1672 device_get_name(sc->dev), device_get_unit(sc->dev));
1674 sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1;
1675 IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen);
1676 IFQ_SET_READY(&sc->ifp->if_snd);
1678 sc->ifp->if_hwassist = OCE_IF_HWASSIST;
1679 sc->ifp->if_hwassist |= CSUM_TSO;
1680 sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
1682 sc->ifp->if_capabilities = OCE_IF_CAPABILITIES;
1683 sc->ifp->if_capabilities |= IFCAP_HWCSUM;
1684 sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
1685 #if defined(INET6) || defined(INET)
1686 sc->ifp->if_capabilities |= IFCAP_TSO;
1687 sc->ifp->if_capabilities |= IFCAP_LRO;
1690 sc->ifp->if_capenable = sc->ifp->if_capabilities;
1691 sc->ifp->if_baudrate = IF_Gbps(10UL);
1693 ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
1700 oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1702 POCE_SOFTC sc = ifp->if_softc;
1704 if (ifp->if_softc != arg)
1706 if ((vtag == 0) || (vtag > 4095))
1709 sc->vlan_tag[vtag] = 1;
1716 oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag)
1718 POCE_SOFTC sc = ifp->if_softc;
1720 if (ifp->if_softc != arg)
1722 if ((vtag == 0) || (vtag > 4095))
1725 sc->vlan_tag[vtag] = 0;
1732 * A max of 64 vlans can be configured in BE. If the user configures
1733 * more, place the card in vlan promiscuous mode.
1736 oce_vid_config(POCE_SOFTC sc)
1738 struct normal_vlan vtags[MAX_VLANFILTER_SIZE];
1739 uint16_t ntags = 0, i;
1742 if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) &&
1743 (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) {
1744 for (i = 0; i < MAX_VLANS; i++) {
1745 if (sc->vlan_tag[i]) {
1746 vtags[ntags].vtag = i;
1751 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1752 vtags, ntags, 1, 0);
1754 status = oce_config_vlan(sc, (uint8_t) sc->if_id,
1761 oce_mac_addr_set(POCE_SOFTC sc)
1763 uint32_t old_pmac_id = sc->pmac_id;
1767 status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1768 sc->macaddr.size_of_struct);
1772 status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)),
1773 sc->if_id, &sc->pmac_id);
1775 status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id);
1776 bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr,
1777 sc->macaddr.size_of_struct);
1780 device_printf(sc->dev, "Failed update macaddress\n");
1786 oce_handle_passthrough(struct ifnet *ifp, caddr_t data)
1788 POCE_SOFTC sc = ifp->if_softc;
1789 struct ifreq *ifr = (struct ifreq *)data;
1791 char cookie[32] = {0};
1792 void *priv_data = (void *)ifr->ifr_data;
1796 OCE_DMA_MEM dma_mem;
1799 if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE)))
1802 if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE)))
1805 ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE);
1806 if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr)))
1809 req_size = le32toh(req.u0.req.request_length);
1810 if (req_size > 65536)
1813 req_size += sizeof(struct mbx_hdr);
1814 rc = oce_dma_alloc(sc, req_size, &dma_mem, 0);
1818 if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) {
1823 rc = oce_pass_through_mbox(sc, &dma_mem, req_size);
1829 if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size))
1833 oce_dma_free(sc, &dma_mem);
1840 oce_local_timer(void *arg)
1842 POCE_SOFTC sc = arg;
1845 oce_refresh_nic_stats(sc);
1846 oce_refresh_queue_stats(sc);
1847 oce_mac_addr_set(sc);
1850 for (i = 0; i < sc->nwqs; i++)
1851 oce_tx_restart(sc, sc->wq[i]);
1853 callout_reset(&sc->timer, hz, oce_local_timer, sc);
1858 oce_if_deactivate(POCE_SOFTC sc)
1866 sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1868 /*Wait for max of 400ms for TX completions to be done */
1869 while (mtime < 400) {
1871 for_all_wq_queues(sc, wq, i) {
1872 if (wq->ring->num_used) {
1883 /* Stop intrs and finish any bottom halves pending */
1884 oce_hw_intr_disable(sc);
1886 for (i = 0; i < sc->intr_count; i++) {
1887 if (sc->intrs[i].tq != NULL) {
1888 taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task);
1892 /* Delete RX queue in card with flush param */
1895 /* Invalidate any pending cq and eq entries*/
1896 for_all_evnt_queues(sc, eq, i)
1898 for_all_rq_queues(sc, rq, i)
1899 oce_drain_rq_cq(rq);
1900 for_all_wq_queues(sc, wq, i)
1901 oce_drain_wq_cq(wq);
1903 /* But still we need to get MCC aync events.
1904 So enable intrs and also arm first EQ
1906 oce_hw_intr_enable(sc);
1907 oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE);
1914 oce_if_activate(POCE_SOFTC sc)
1921 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
1923 oce_hw_intr_disable(sc);
1927 for_all_rq_queues(sc, rq, i) {
1928 rc = oce_start_rq(rq);
1930 device_printf(sc->dev, "Unable to start RX\n");
1933 for_all_wq_queues(sc, wq, i) {
1934 rc = oce_start_wq(wq);
1936 device_printf(sc->dev, "Unable to start TX\n");
1940 for_all_evnt_queues(sc, eq, i)
1941 oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE);
1943 oce_hw_intr_enable(sc);
1947 /* Handle the Completion Queue for the Mailbox/Async notifications */
1949 oce_mq_handler(void *arg)
1951 struct oce_mq *mq = (struct oce_mq *)arg;
1952 POCE_SOFTC sc = mq->parent;
1953 struct oce_cq *cq = mq->cq;
1955 struct oce_mq_cqe *cqe;
1956 struct oce_async_cqe_link_state *acqe;
1958 bus_dmamap_sync(cq->ring->dma.tag,
1959 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1960 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1961 while (cqe->u0.dw[3]) {
1962 DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe));
1963 if (cqe->u0.s.async_event) {
1964 acqe = (struct oce_async_cqe_link_state *)cqe;
1965 if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) ==
1966 ASYNC_EVENT_LINK_UP) {
1967 sc->link_status = ASYNC_EVENT_LINK_UP;
1968 if_link_state_change(sc->ifp, LINK_STATE_UP);
1970 sc->link_status = ASYNC_EVENT_LINK_DOWN;
1971 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
1974 if (acqe->u0.s.event_code ==
1975 ASYNC_EVENT_CODE_LINK_STATE) {
1976 sc->link_speed = acqe->u0.s.speed;
1977 sc->qos_link_speed =
1978 (uint32_t )acqe->u0.s.qos_link_speed * 10;
1982 RING_GET(cq->ring, 1);
1983 RING_GET(mq->ring, 1);
1984 bus_dmamap_sync(cq->ring->dma.tag,
1985 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1986 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
1991 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1998 setup_max_queues_want(POCE_SOFTC sc)
2002 /* Check if it is FLEX machine. Is so dont use RSS */
2003 if ((sc->function_mode & FNM_FLEX10_MODE) ||
2004 (!sc->rss_enable) ||
2005 (sc->flags & OCE_FLAGS_BE2)) {
2010 /* For multiq, our deisgn is to have TX rings equal to
2011 RSS rings. So that we can pair up one RSS ring and TX
2012 to a single intr, which improves CPU cache efficiency.
2014 if (IS_BE(sc) && (!sc->be3_native))
2015 max_rss = OCE_LEGACY_MODE_RSS;
2017 max_rss = OCE_MAX_RSS;
2019 sc->nrqs = MIN(OCE_NCPUS, max_rss) + 1; /* 1 for def RX */
2020 sc->nwqs = MIN(OCE_NCPUS, max_rss);
2022 /*Hardware issue. Turn off multi TX for be2 */
2023 if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))
2032 update_queues_got(POCE_SOFTC sc)
2034 if (sc->rss_enable) {
2035 sc->nrqs = sc->intr_count + 1;
2036 sc->nwqs = sc->intr_count;
2037 if (IS_BE(sc) && (sc->flags & OCE_FLAGS_BE2))