2 * Copyright (C) 2013 Emulex
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Contact Information:
32 * freebsd-drivers@emulex.com
36 * Costa Mesa, CA 92626
43 /*****************************************************
44 * local queue functions
45 *****************************************************/
47 static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
48 uint32_t q_len, uint32_t wq_type);
49 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
50 static void oce_wq_free(struct oce_wq *wq);
51 static void oce_wq_del(struct oce_wq *wq);
52 static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
55 uint32_t mtu, uint32_t rss);
56 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
57 static void oce_rq_free(struct oce_rq *rq);
58 static void oce_rq_del(struct oce_rq *rq);
59 static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
64 static void oce_eq_del(struct oce_eq *eq);
65 static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
66 struct oce_eq *eq, uint32_t q_len);
67 static void oce_mq_free(struct oce_mq *mq);
68 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
69 *mbx, size_t req_size, enum qtype qtype, int version);
70 struct oce_cq *oce_cq_create(POCE_SOFTC sc,
75 uint32_t is_eventable,
76 uint32_t nodelay, uint32_t ncoalesce);
77 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
82 * @brief Create and initialize all the queues on the board
83 * @param sc software handle to the device
84 * @returns 0 if successful, or error
87 oce_queue_init_all(POCE_SOFTC sc)
89 int rc = 0, i, vector;
92 struct oce_aic_obj *aic;
94 /* alloc TX/RX queues */
95 for_all_wq_queues(sc, wq, i) {
96 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
97 NIC_WQ_TYPE_STANDARD);
103 for_all_rq_queues(sc, rq, i) {
104 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
105 OCE_MAX_JUMBO_FRAME_SIZE,
106 (i == 0) ? 0 : is_rss_enabled(sc));
111 /* Create network interface on card */
112 if (oce_create_nw_interface(sc))
115 /* create all of the event queues */
116 for (vector = 0; vector < sc->intr_count; vector++) {
117 /* setup aic defaults for each event queue */
118 aic = &sc->aic_obj[vector];
119 aic->max_eqd = OCE_MAX_EQD;
120 aic->min_eqd = OCE_MIN_EQD;
121 aic->et_eqd = OCE_MIN_EQD;
124 sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024,
125 EQE_SIZE_4,0, vector);
131 /* create Tx, Rx and mcc queues */
132 for_all_wq_queues(sc, wq, i) {
133 rc = oce_wq_create(wq, sc->eq[i]);
137 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
140 for_all_rq_queues(sc, rq, i) {
141 rc = oce_rq_create(rq, sc->if_id,
142 sc->eq[(i == 0) ? 0:(i-1)]);
148 sc->mq = oce_mq_create(sc, sc->eq[0], 64);
155 oce_queue_release_all(sc);
162 * @brief Releases all mailbox queues created
163 * @param sc software handle to the device
166 oce_queue_release_all(POCE_SOFTC sc)
173 /* before deleting lro queues, we have to disable hwlro */
175 oce_mbox_nic_set_iface_lro_config(sc, 0);
177 for_all_rq_queues(sc, rq, i) {
179 oce_rq_del(sc->rq[i]);
180 oce_rq_free(sc->rq[i]);
184 for_all_wq_queues(sc, wq, i) {
186 oce_wq_del(sc->wq[i]);
187 oce_wq_free(sc->wq[i]);
194 for_all_evnt_queues(sc, eq, i) {
196 oce_eq_del(sc->eq[i]);
203 * @brief Function to create a WQ for NIC Tx
204 * @param sc software handle to the device
205 * @param qlen number of entries in the queue
206 * @param wq_type work queue type
207 * @returns the pointer to the WQ created or NULL on failure
210 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
215 /* q_len must be min 256 and max 2k */
216 if (q_len < 256 || q_len > 2048) {
217 device_printf(sc->dev,
218 "Invalid q length. Must be "
219 "[256, 2000]: 0x%x\n", q_len);
224 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
228 /* Set the wq config */
229 wq->cfg.q_len = q_len;
230 wq->cfg.wq_type = (uint8_t) wq_type;
231 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
232 wq->cfg.nbufs = 2 * wq->cfg.q_len;
233 wq->cfg.nhdl = 2 * wq->cfg.q_len;
235 wq->parent = (void *)sc;
237 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
244 PAGE_SIZE, 0, NULL, NULL, &wq->tag);
250 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
251 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
256 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
261 LOCK_CREATE(&wq->tx_lock, "TX_lock");
262 LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK");
264 #if __FreeBSD_version >= 800000
265 /* Allocate buf ring for multiqueue*/
266 wq->br = buf_ring_alloc(4096, M_DEVBUF,
267 M_WAITOK, &wq->tx_lock.mutex);
275 device_printf(sc->dev, "Create WQ failed\n");
283 * @brief Frees the work queue
284 * @param wq pointer to work queue to free
287 oce_wq_free(struct oce_wq *wq)
289 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
292 taskqueue_drain(taskqueue_swi, &wq->txtask);
294 if (wq->ring != NULL) {
295 oce_destroy_ring_buffer(sc, wq->ring);
299 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
300 if (wq->pckts[i].map != NULL) {
301 bus_dmamap_unload(wq->tag, wq->pckts[i].map);
302 bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
303 wq->pckts[i].map = NULL;
308 bus_dma_tag_destroy(wq->tag);
310 buf_ring_free(wq->br, M_DEVBUF);
312 LOCK_DESTROY(&wq->tx_lock);
313 LOCK_DESTROY(&wq->tx_compl_lock);
320 * @brief Create a work queue
321 * @param wq pointer to work queue
322 * @param eq pointer to associated event queue
325 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
327 POCE_SOFTC sc = wq->parent;
332 cq = oce_cq_create(sc,
335 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
342 rc = oce_mbox_create_wq(wq);
346 wq->qstate = QCREATED;
347 wq->wq_free = wq->cfg.q_len;
351 eq->cq[eq->cq_valid] = cq;
354 cq->cq_handler = oce_wq_handler;
359 device_printf(sc->dev, "WQ create failed\n");
368 * @brief Delete a work queue
369 * @param wq pointer to work queue
372 oce_wq_del(struct oce_wq *wq)
375 struct mbx_delete_nic_wq *fwcmd;
376 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
378 if (wq->qstate == QCREATED) {
379 bzero(&mbx, sizeof(struct oce_mbx));
380 /* now fill the command */
381 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
382 fwcmd->params.req.wq_id = wq->wq_id;
383 (void)oce_destroy_q(sc, &mbx,
384 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0);
385 wq->qstate = QDELETED;
388 if (wq->cq != NULL) {
389 oce_cq_del(sc, wq->cq);
397 * @brief function to allocate receive queue resources
398 * @param sc software handle to the device
399 * @param q_len length of receive queue
400 * @param frag_size size of an receive queue fragment
401 * @param mtu maximum transmission unit
402 * @param rss is-rss-queue flag
403 * @returns the pointer to the RQ created or NULL on failure
406 oce_rq *oce_rq_init(POCE_SOFTC sc,
409 uint32_t mtu, uint32_t rss)
414 if (OCE_LOG2(frag_size) <= 0)
417 if ((q_len == 0) || (q_len > 1024))
420 /* allocate the rq */
421 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
426 rq->cfg.q_len = q_len;
427 rq->cfg.frag_size = frag_size;
430 rq->lro_pkts_queued = 0;
431 rq->cfg.is_rss_queue = rss;
434 rq->parent = (void *)sc;
436 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
442 1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag);
446 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
447 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
452 /* create the ring buffer */
453 rq->ring = oce_create_ring_buffer(sc, q_len,
454 sizeof(struct oce_nic_rqe));
458 LOCK_CREATE(&rq->rx_lock, "RX_lock");
463 device_printf(sc->dev, "Create RQ failed\n");
472 * @brief Free a receive queue
473 * @param rq pointer to receive queue
476 oce_rq_free(struct oce_rq *rq)
478 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
481 if (rq->ring != NULL) {
482 oce_destroy_ring_buffer(sc, rq->ring);
485 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
486 if (rq->pckts[i].map != NULL) {
487 bus_dmamap_unload(rq->tag, rq->pckts[i].map);
488 bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
489 rq->pckts[i].map = NULL;
491 if (rq->pckts[i].mbuf) {
492 m_free(rq->pckts[i].mbuf);
493 rq->pckts[i].mbuf = NULL;
498 bus_dma_tag_destroy(rq->tag);
500 LOCK_DESTROY(&rq->rx_lock);
508 * @brief Create a receive queue
509 * @param rq receive queue
510 * @param if_id interface identifier index`
511 * @param eq pointer to event queue
514 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
516 POCE_SOFTC sc = rq->parent;
519 cq = oce_cq_create(sc, eq,
520 sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024,
521 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
527 rq->cfg.if_id = if_id;
529 /* Dont create RQ here. Create in if_activate */
533 eq->cq[eq->cq_valid] = cq;
536 cq->cq_handler = oce_rq_handler;
546 * @brief Delete a receive queue
547 * @param rq receive queue
550 oce_rq_del(struct oce_rq *rq)
552 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
554 struct mbx_delete_nic_rq *fwcmd;
555 struct mbx_delete_nic_rq_v1 *fwcmd1;
557 if (rq->qstate == QCREATED) {
558 bzero(&mbx, sizeof(mbx));
560 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
561 fwcmd->params.req.rq_id = rq->rq_id;
562 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
564 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
565 fwcmd1->params.req.rq_id = rq->rq_id;
566 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
567 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1);
569 rq->qstate = QDELETED;
572 if (rq->cq != NULL) {
573 oce_cq_del(sc, rq->cq);
581 * @brief function to create an event queue
582 * @param sc software handle to the device
583 * @param q_len length of event queue
584 * @param item_size size of an event queue item
585 * @param eq_delay event queue delay
586 * @retval eq success, pointer to event queue
587 * @retval NULL failure
590 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
599 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
603 eq->parent = (void *)sc;
605 eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
609 eq->eq_cfg.q_len = q_len;
610 eq->eq_cfg.item_size = item_size;
611 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
613 rc = oce_mbox_create_eq(eq);
617 sc->intrs[sc->neqs++].eq = eq;
630 * @brief Function to delete an event queue
631 * @param eq pointer to an event queue
634 oce_eq_del(struct oce_eq *eq)
637 struct mbx_destroy_common_eq *fwcmd;
638 POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
640 if (eq->eq_id != 0xffff) {
641 bzero(&mbx, sizeof(mbx));
642 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
643 fwcmd->params.req.id = eq->eq_id;
644 (void)oce_destroy_q(sc, &mbx,
645 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0);
648 if (eq->ring != NULL) {
649 oce_destroy_ring_buffer(sc, eq->ring);
661 * @brief Function to create an MQ
662 * @param sc software handle to the device
663 * @param eq the EQ to associate with the MQ for event notification
664 * @param q_len the number of entries to create in the MQ
665 * @returns pointer to the created MQ, failure otherwise
667 static struct oce_mq *
668 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
671 struct mbx_create_common_mq_ex *fwcmd = NULL;
672 struct oce_mq *mq = NULL;
675 oce_mq_ext_ctx_t *ctx;
680 cq = oce_cq_create(sc, eq, CQ_LEN_256,
681 sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
685 /* allocate the mq */
686 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
694 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
698 bzero(&mbx, sizeof(struct oce_mbx));
700 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
701 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
702 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
703 MBX_SUBSYSTEM_COMMON,
704 OPCODE_COMMON_CREATE_MQ_EXT,
706 sizeof(struct mbx_create_common_mq_ex),
709 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
710 page_size = mq->ring->num_items * mq->ring->item_size;
712 ctx = &fwcmd->params.req.context;
715 ctx->v1.num_pages = num_pages;
716 ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
717 ctx->v1.cq_id = cq->cq_id;
719 ctx->v1.async_cq_id = cq->cq_id;
720 ctx->v1.async_cq_valid = 1;
721 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
722 ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
723 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
724 ctx->v1.async_evt_bitmap |=
725 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
728 ctx->v0.num_pages = num_pages;
729 ctx->v0.cq_id = cq->cq_id;
730 ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
732 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
733 ctx->v0.async_evt_bitmap = 0xffffffff;
736 mbx.u0.s.embedded = 1;
737 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
738 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
740 rc = oce_mbox_post(sc, &mbx, NULL);
742 rc = fwcmd->hdr.u0.rsp.status;
744 device_printf(sc->dev,"%s failed - cmd status: %d\n",
748 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
750 eq->cq[eq->cq_valid] = cq;
753 mq->cfg.q_len = (uint8_t) q_len;
755 mq->qstate = QCREATED;
758 mq->cq->cq_handler = oce_mq_handler;
763 device_printf(sc->dev, "MQ create failed\n");
774 * @brief Function to free a mailbox queue
775 * @param mq pointer to a mailbox queue
778 oce_mq_free(struct oce_mq *mq)
780 POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
782 struct mbx_destroy_common_mq *fwcmd;
787 if (mq->ring != NULL) {
788 oce_destroy_ring_buffer(sc, mq->ring);
790 if (mq->qstate == QCREATED) {
791 bzero(&mbx, sizeof (struct oce_mbx));
792 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
793 fwcmd->params.req.id = mq->mq_id;
794 (void) oce_destroy_q(sc, &mbx,
795 sizeof (struct mbx_destroy_common_mq),
798 mq->qstate = QDELETED;
801 if (mq->cq != NULL) {
802 oce_cq_del(sc, mq->cq);
813 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
814 * @param sc sofware handle to the device
815 * @param mbx mailbox command to send to the fw to delete the queue
816 * (mbx contains the queue information to delete)
817 * @param req_size the size of the mbx payload dependent on the qtype
818 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
819 * @returns 0 on success, failure otherwise
822 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
823 enum qtype qtype, int version)
825 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
832 opcode = OPCODE_COMMON_DESTROY_EQ;
833 subsys = MBX_SUBSYSTEM_COMMON;
836 opcode = OPCODE_COMMON_DESTROY_CQ;
837 subsys = MBX_SUBSYSTEM_COMMON;
840 opcode = OPCODE_COMMON_DESTROY_MQ;
841 subsys = MBX_SUBSYSTEM_COMMON;
844 opcode = NIC_DELETE_WQ;
845 subsys = MBX_SUBSYSTEM_NIC;
848 opcode = NIC_DELETE_RQ;
849 subsys = MBX_SUBSYSTEM_NIC;
855 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
856 opcode, MBX_TIMEOUT_SEC, req_size,
859 mbx->u0.s.embedded = 1;
860 mbx->payload_length = (uint32_t) req_size;
861 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
863 rc = oce_mbox_post(sc, mbx, NULL);
865 rc = hdr->u0.rsp.status;
867 device_printf(sc->dev,"%s failed - cmd status: %d\n",
875 * @brief Function to create a completion queue
876 * @param sc software handle to the device
877 * @param eq optional eq to be associated with to the cq
878 * @param q_len length of completion queue
879 * @param item_size size of completion queue items
880 * @param sol_event command context event
881 * @param is_eventable event table
882 * @param nodelay no delay flag
883 * @param ncoalesce no coalescence flag
884 * @returns pointer to the cq created, NULL on failure
887 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
891 uint32_t is_eventable,
892 uint32_t nodelay, uint32_t ncoalesce)
894 struct oce_cq *cq = NULL;
897 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
901 cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
907 cq->cq_cfg.q_len = q_len;
908 cq->cq_cfg.item_size = item_size;
909 cq->cq_cfg.nodelay = (uint8_t) nodelay;
911 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
915 sc->cq[sc->ncqs++] = cq;
920 device_printf(sc->dev, "CQ create failed\n");
928 * @brief Deletes the completion queue
929 * @param sc software handle to the device
930 * @param cq pointer to a completion queue
933 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
936 struct mbx_destroy_common_cq *fwcmd;
938 if (cq->ring != NULL) {
940 bzero(&mbx, sizeof(struct oce_mbx));
941 /* now fill the command */
942 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
943 fwcmd->params.req.id = cq->cq_id;
944 (void)oce_destroy_q(sc, &mbx,
945 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0);
946 /*NOW destroy the ring */
947 oce_destroy_ring_buffer(sc, cq->ring);
958 * @brief Start a receive queue
959 * @param rq pointer to a receive queue
962 oce_start_rq(struct oce_rq *rq)
964 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
968 rc = oce_alloc_rx_bufs(rq, 960);
970 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1);
973 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
981 * @brief Start a work queue
982 * @param wq pointer to a work queue
985 oce_start_wq(struct oce_wq *wq)
987 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
994 * @brief Start a mailbox queue
995 * @param mq pointer to a mailbox queue
998 oce_start_mq(struct oce_mq *mq)
1000 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
1007 * @brief Function to arm an EQ so that it can generate events
1008 * @param sc software handle to the device
1009 * @param qid id of the EQ returned by the fw at the time of creation
1010 * @param npopped number of EQEs to arm
1011 * @param rearm rearm bit enable/disable
1012 * @param clearint bit to clear the interrupt condition because of which
1013 * EQEs are generated
1016 oce_arm_eq(POCE_SOFTC sc,
1017 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1019 eq_db_t eq_db = { 0 };
1021 eq_db.bits.rearm = rearm;
1022 eq_db.bits.event = 1;
1023 eq_db.bits.num_popped = npopped;
1024 eq_db.bits.clrint = clearint;
1025 eq_db.bits.qid = qid;
1026 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1034 * @brief Function to arm a CQ with CQEs
1035 * @param sc software handle to the device
1036 * @param qid id of the CQ returned by the fw at the time of creation
1037 * @param npopped number of CQEs to arm
1038 * @param rearm rearm bit enable/disable
1040 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1042 cq_db_t cq_db = { 0 };
1044 cq_db.bits.rearm = rearm;
1045 cq_db.bits.num_popped = npopped;
1046 cq_db.bits.event = 0;
1047 cq_db.bits.qid = qid;
1048 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1056 * @brief function to cleanup the eqs used during stop
1057 * @param eq pointer to event queue structure
1058 * @returns the number of EQs processed
1061 oce_drain_eq(struct oce_eq *eq)
1064 struct oce_eqe *eqe;
1065 uint16_t num_eqe = 0;
1066 POCE_SOFTC sc = eq->parent;
1069 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1073 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1074 BUS_DMASYNC_POSTWRITE);
1076 RING_GET(eq->ring, 1);
1080 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1087 oce_drain_wq_cq(struct oce_wq *wq)
1089 POCE_SOFTC sc = wq->parent;
1090 struct oce_cq *cq = wq->cq;
1091 struct oce_nic_tx_cqe *cqe;
1094 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1095 BUS_DMASYNC_POSTWRITE);
1098 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1099 if (cqe->u0.dw[3] == 0)
1102 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1103 BUS_DMASYNC_POSTWRITE);
1104 RING_GET(cq->ring, 1);
1109 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1115 * @brief function to drain a MCQ and process its CQEs
1116 * @param dev software handle to the device
1117 * @param cq pointer to the cq to drain
1118 * @returns the number of CQEs processed
1121 oce_drain_mq_cq(void *arg)
1123 /* TODO: additional code. */
1130 * @brief function to process a Recieve queue
1131 * @param arg pointer to the RQ to charge
1132 * @return number of cqes processed
1135 oce_drain_rq_cq(struct oce_rq *rq)
1137 struct oce_nic_rx_cqe *cqe;
1138 uint16_t num_cqe = 0;
1144 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1145 /* dequeue till you reach an invalid cqe */
1146 while (RQ_CQE_VALID(cqe)) {
1147 RQ_CQE_INVALIDATE(cqe);
1148 RING_GET(cq->ring, 1);
1149 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1150 struct oce_nic_rx_cqe);
1153 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1160 oce_free_posted_rxbuf(struct oce_rq *rq)
1162 struct oce_packet_desc *pd;
1164 while (rq->pending) {
1166 pd = &rq->pckts[rq->ring->cidx];
1167 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1168 bus_dmamap_unload(rq->tag, pd->map);
1169 if (pd->mbuf != NULL) {
1174 RING_GET(rq->ring,1);
1181 oce_rx_cq_clean_hwlro(struct oce_rq *rq)
1183 struct oce_cq *cq = rq->cq;
1184 POCE_SOFTC sc = rq->parent;
1185 struct nic_hwlro_singleton_cqe *cqe;
1186 struct nic_hwlro_cqe_part2 *cqe2;
1188 int flush_compl = 0;
1192 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1193 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1195 if(cqe->cqe_type == 0) { /* singleton cqe */
1196 /* we should not get singleton cqe after cqe1 on same rq */
1197 if(rq->cqe_firstpart != NULL) {
1198 device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1199 goto exit_rx_cq_clean_hwlro;
1201 num_frags = cqe->pkt_size / rq->cfg.frag_size;
1202 if(cqe->pkt_size % rq->cfg.frag_size)
1204 oce_discard_rx_comp(rq, num_frags);
1205 /* Check if CQE is flush completion */
1209 RING_GET(cq->ring, 1);
1210 }else if(cqe->cqe_type == 0x1) { /* first part */
1211 /* we should not get cqe1 after cqe1 on same rq */
1212 if(rq->cqe_firstpart != NULL) {
1213 device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1214 goto exit_rx_cq_clean_hwlro;
1216 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1217 RING_GET(cq->ring, 1);
1218 }else if(cqe->cqe_type == 0x2) { /* second part */
1219 cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
1220 /* We should not get cqe2 without cqe1 */
1221 if(rq->cqe_firstpart == NULL) {
1222 device_printf(sc->dev, "Got cqe2 without cqe1 \n");
1223 goto exit_rx_cq_clean_hwlro;
1225 num_frags = cqe2->coalesced_size / rq->cfg.frag_size;
1226 if(cqe2->coalesced_size % rq->cfg.frag_size)
1229 /* Flush completion will always come in singleton CQE */
1230 oce_discard_rx_comp(rq, num_frags);
1232 rq->cqe_firstpart->valid = 0;
1234 rq->cqe_firstpart = NULL;
1235 RING_GET(cq->ring, 1);
1237 oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1241 if (flush_wait++ > 100) {
1242 device_printf(sc->dev, "did not receive hwlro flush compl\n");
1245 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1250 /* After cleanup, leave the CQ in unarmed state */
1251 oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1253 exit_rx_cq_clean_hwlro:
1259 oce_rx_cq_clean(struct oce_rq *rq)
1261 struct oce_nic_rx_cqe *cqe;
1265 int flush_compl = 0;
1270 bus_dmamap_sync(cq->ring->dma.tag,
1271 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1272 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1273 if(RQ_CQE_VALID(cqe)) {
1274 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1275 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1276 /* Check if CQE is flush completion */
1277 if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0))
1280 RQ_CQE_INVALIDATE(cqe);
1281 RING_GET(cq->ring, 1);
1282 #if defined(INET6) || defined(INET)
1283 if (IF_LRO_ENABLED(sc))
1284 oce_rx_flush_lro(rq);
1286 oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1290 if (flush_wait++ > 100) {
1291 device_printf(sc->dev, "did not receive flush compl\n");
1294 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1299 /* After cleanup, leave the CQ in unarmed state */
1300 oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1304 oce_stop_rx(POCE_SOFTC sc)
1307 struct mbx_delete_nic_rq *fwcmd;
1308 struct mbx_delete_nic_rq_v1 *fwcmd1;
1312 /* before deleting disable hwlro */
1313 if(sc->enable_hwlro)
1314 oce_mbox_nic_set_iface_lro_config(sc, 0);
1316 for_all_rq_queues(sc, rq, i) {
1317 if (rq->qstate == QCREATED) {
1318 /* Delete rxq in firmware */
1321 bzero(&mbx, sizeof(mbx));
1323 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1324 fwcmd->params.req.rq_id = rq->rq_id;
1325 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
1327 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
1328 fwcmd1->params.req.rq_id = rq->rq_id;
1329 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
1331 (void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1);
1333 rq->qstate = QDELETED;
1338 oce_rx_cq_clean(rq);
1340 oce_rx_cq_clean_hwlro(rq);
1342 /* Free posted RX buffers that are not used */
1343 oce_free_posted_rxbuf(rq);
1344 UNLOCK(&rq->rx_lock);
1352 oce_start_rx(POCE_SOFTC sc)
1357 for_all_rq_queues(sc, rq, i) {
1358 if (rq->qstate == QCREATED)
1360 if((i == 0) || (!sc->enable_hwlro)) {
1361 rc = oce_mbox_create_rq(rq);
1366 rc = oce_mbox_create_rq_v2(rq);
1371 /* reset queue pointers */
1372 rq->qstate = QCREATED;
1378 if(sc->enable_hwlro) {
1379 rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
1387 if (is_rss_enabled(sc)) {
1388 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1397 device_printf(sc->dev, "Start RX failed\n");