2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (C) 2013 Emulex
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the Emulex Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
33 * Contact Information:
34 * freebsd-drivers@emulex.com
38 * Costa Mesa, CA 92626
44 /*****************************************************
45 * local queue functions
46 *****************************************************/
48 static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
49 uint32_t q_len, uint32_t wq_type);
50 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
51 static void oce_wq_free(struct oce_wq *wq);
52 static void oce_wq_del(struct oce_wq *wq);
53 static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
56 uint32_t mtu, uint32_t rss);
57 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
58 static void oce_rq_free(struct oce_rq *rq);
59 static void oce_rq_del(struct oce_rq *rq);
60 static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
65 static void oce_eq_del(struct oce_eq *eq);
66 static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
67 struct oce_eq *eq, uint32_t q_len);
68 static void oce_mq_free(struct oce_mq *mq);
69 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
70 *mbx, size_t req_size, enum qtype qtype, int version);
71 struct oce_cq *oce_cq_create(POCE_SOFTC sc,
76 uint32_t is_eventable,
77 uint32_t nodelay, uint32_t ncoalesce);
78 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
81 * @brief Create and initialize all the queues on the board
82 * @param sc software handle to the device
83 * @returns 0 if successful, or error
86 oce_queue_init_all(POCE_SOFTC sc)
88 int rc = 0, i, vector;
91 struct oce_aic_obj *aic;
93 /* alloc TX/RX queues */
94 for_all_wq_queues(sc, wq, i) {
95 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
96 NIC_WQ_TYPE_STANDARD);
102 for_all_rq_queues(sc, rq, i) {
103 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
104 OCE_MAX_JUMBO_FRAME_SIZE,
105 (i == 0) ? 0 : is_rss_enabled(sc));
110 /* Create network interface on card */
111 if (oce_create_nw_interface(sc))
114 /* create all of the event queues */
115 for (vector = 0; vector < sc->intr_count; vector++) {
116 /* setup aic defaults for each event queue */
117 aic = &sc->aic_obj[vector];
118 aic->max_eqd = OCE_MAX_EQD;
119 aic->min_eqd = OCE_MIN_EQD;
120 aic->et_eqd = OCE_MIN_EQD;
123 sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024,
124 EQE_SIZE_4,0, vector);
130 /* create Tx, Rx and mcc queues */
131 for_all_wq_queues(sc, wq, i) {
132 rc = oce_wq_create(wq, sc->eq[i]);
136 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
139 for_all_rq_queues(sc, rq, i) {
140 rc = oce_rq_create(rq, sc->if_id,
141 sc->eq[(i == 0) ? 0:(i-1)]);
147 sc->mq = oce_mq_create(sc, sc->eq[0], 64);
154 oce_queue_release_all(sc);
159 * @brief Releases all mailbox queues created
160 * @param sc software handle to the device
163 oce_queue_release_all(POCE_SOFTC sc)
170 /* before deleting lro queues, we have to disable hwlro */
172 oce_mbox_nic_set_iface_lro_config(sc, 0);
174 for_all_rq_queues(sc, rq, i) {
176 oce_rq_del(sc->rq[i]);
177 oce_rq_free(sc->rq[i]);
181 for_all_wq_queues(sc, wq, i) {
183 oce_wq_del(sc->wq[i]);
184 oce_wq_free(sc->wq[i]);
191 for_all_evnt_queues(sc, eq, i) {
193 oce_eq_del(sc->eq[i]);
198 * @brief Function to create a WQ for NIC Tx
199 * @param sc software handle to the device
200 * @param qlen number of entries in the queue
201 * @param wq_type work queue type
202 * @returns the pointer to the WQ created or NULL on failure
205 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
210 /* q_len must be min 256 and max 2k */
211 if (q_len < 256 || q_len > 2048) {
212 device_printf(sc->dev,
213 "Invalid q length. Must be "
214 "[256, 2000]: 0x%x\n", q_len);
219 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
223 /* Set the wq config */
224 wq->cfg.q_len = q_len;
225 wq->cfg.wq_type = (uint8_t) wq_type;
226 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
227 wq->cfg.nbufs = 2 * wq->cfg.q_len;
228 wq->cfg.nhdl = 2 * wq->cfg.q_len;
230 wq->parent = (void *)sc;
232 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
239 PAGE_SIZE, 0, NULL, NULL, &wq->tag);
244 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
245 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
250 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
254 LOCK_CREATE(&wq->tx_lock, "TX_lock");
255 LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK");
257 /* Allocate buf ring for multiqueue*/
258 wq->br = buf_ring_alloc(4096, M_DEVBUF,
259 M_WAITOK, &wq->tx_lock.mutex);
265 device_printf(sc->dev, "Create WQ failed\n");
271 * @brief Frees the work queue
272 * @param wq pointer to work queue to free
275 oce_wq_free(struct oce_wq *wq)
277 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
280 taskqueue_drain(taskqueue_swi, &wq->txtask);
282 if (wq->ring != NULL) {
283 oce_destroy_ring_buffer(sc, wq->ring);
287 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
288 if (wq->pckts[i].map != NULL) {
289 bus_dmamap_unload(wq->tag, wq->pckts[i].map);
290 bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
291 wq->pckts[i].map = NULL;
296 bus_dma_tag_destroy(wq->tag);
298 buf_ring_free(wq->br, M_DEVBUF);
300 LOCK_DESTROY(&wq->tx_lock);
301 LOCK_DESTROY(&wq->tx_compl_lock);
306 * @brief Create a work queue
307 * @param wq pointer to work queue
308 * @param eq pointer to associated event queue
311 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
313 POCE_SOFTC sc = wq->parent;
318 cq = oce_cq_create(sc,
321 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
327 rc = oce_mbox_create_wq(wq);
331 wq->qstate = QCREATED;
332 wq->wq_free = wq->cfg.q_len;
336 eq->cq[eq->cq_valid] = cq;
339 cq->cq_handler = oce_wq_handler;
344 device_printf(sc->dev, "WQ create failed\n");
350 * @brief Delete a work queue
351 * @param wq pointer to work queue
354 oce_wq_del(struct oce_wq *wq)
357 struct mbx_delete_nic_wq *fwcmd;
358 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
360 if (wq->qstate == QCREATED) {
361 bzero(&mbx, sizeof(struct oce_mbx));
362 /* now fill the command */
363 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
364 fwcmd->params.req.wq_id = wq->wq_id;
365 (void)oce_destroy_q(sc, &mbx,
366 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0);
367 wq->qstate = QDELETED;
370 if (wq->cq != NULL) {
371 oce_cq_del(sc, wq->cq);
377 * @brief function to allocate receive queue resources
378 * @param sc software handle to the device
379 * @param q_len length of receive queue
380 * @param frag_size size of an receive queue fragment
381 * @param mtu maximum transmission unit
382 * @param rss is-rss-queue flag
383 * @returns the pointer to the RQ created or NULL on failure
386 oce_rq *oce_rq_init(POCE_SOFTC sc,
389 uint32_t mtu, uint32_t rss)
394 if (OCE_LOG2(frag_size) <= 0)
397 if ((q_len == 0) || (q_len > 1024))
400 /* allocate the rq */
401 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
405 rq->cfg.q_len = q_len;
406 rq->cfg.frag_size = frag_size;
409 rq->lro_pkts_queued = 0;
410 rq->cfg.is_rss_queue = rss;
413 rq->parent = (void *)sc;
415 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
421 1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag);
425 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
426 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
431 /* create the ring buffer */
432 rq->ring = oce_create_ring_buffer(sc, q_len,
433 sizeof(struct oce_nic_rqe));
437 LOCK_CREATE(&rq->rx_lock, "RX_lock");
442 device_printf(sc->dev, "Create RQ failed\n");
448 * @brief Free a receive queue
449 * @param rq pointer to receive queue
452 oce_rq_free(struct oce_rq *rq)
454 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
457 if (rq->ring != NULL) {
458 oce_destroy_ring_buffer(sc, rq->ring);
461 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
462 if (rq->pckts[i].map != NULL) {
463 bus_dmamap_unload(rq->tag, rq->pckts[i].map);
464 bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
465 rq->pckts[i].map = NULL;
467 if (rq->pckts[i].mbuf) {
468 m_free(rq->pckts[i].mbuf);
469 rq->pckts[i].mbuf = NULL;
474 bus_dma_tag_destroy(rq->tag);
476 LOCK_DESTROY(&rq->rx_lock);
481 * @brief Create a receive queue
482 * @param rq receive queue
483 * @param if_id interface identifier index`
484 * @param eq pointer to event queue
487 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
489 POCE_SOFTC sc = rq->parent;
492 cq = oce_cq_create(sc, eq,
493 sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024,
494 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
500 rq->cfg.if_id = if_id;
502 /* Dont create RQ here. Create in if_activate */
506 eq->cq[eq->cq_valid] = cq;
509 cq->cq_handler = oce_rq_handler;
516 * @brief Delete a receive queue
517 * @param rq receive queue
520 oce_rq_del(struct oce_rq *rq)
522 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
524 struct mbx_delete_nic_rq *fwcmd;
525 struct mbx_delete_nic_rq_v1 *fwcmd1;
527 if (rq->qstate == QCREATED) {
528 bzero(&mbx, sizeof(mbx));
530 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
531 fwcmd->params.req.rq_id = rq->rq_id;
532 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
534 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
535 fwcmd1->params.req.rq_id = rq->rq_id;
536 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
537 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1);
539 rq->qstate = QDELETED;
542 if (rq->cq != NULL) {
543 oce_cq_del(sc, rq->cq);
549 * @brief function to create an event queue
550 * @param sc software handle to the device
551 * @param q_len length of event queue
552 * @param item_size size of an event queue item
553 * @param eq_delay event queue delay
554 * @retval eq success, pointer to event queue
555 * @retval NULL failure
558 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
567 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
571 eq->parent = (void *)sc;
573 eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
577 eq->eq_cfg.q_len = q_len;
578 eq->eq_cfg.item_size = item_size;
579 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
581 rc = oce_mbox_create_eq(eq);
585 sc->intrs[sc->neqs++].eq = eq;
595 * @brief Function to delete an event queue
596 * @param eq pointer to an event queue
599 oce_eq_del(struct oce_eq *eq)
602 struct mbx_destroy_common_eq *fwcmd;
603 POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
605 if (eq->eq_id != 0xffff) {
606 bzero(&mbx, sizeof(mbx));
607 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
608 fwcmd->params.req.id = eq->eq_id;
609 (void)oce_destroy_q(sc, &mbx,
610 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0);
613 if (eq->ring != NULL) {
614 oce_destroy_ring_buffer(sc, eq->ring);
623 * @brief Function to create an MQ
624 * @param sc software handle to the device
625 * @param eq the EQ to associate with the MQ for event notification
626 * @param q_len the number of entries to create in the MQ
627 * @returns pointer to the created MQ, failure otherwise
629 static struct oce_mq *
630 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
633 struct mbx_create_common_mq_ex *fwcmd = NULL;
634 struct oce_mq *mq = NULL;
637 oce_mq_ext_ctx_t *ctx;
641 cq = oce_cq_create(sc, eq, CQ_LEN_256,
642 sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
646 /* allocate the mq */
647 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
655 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
659 bzero(&mbx, sizeof(struct oce_mbx));
661 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
662 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
663 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
664 MBX_SUBSYSTEM_COMMON,
665 OPCODE_COMMON_CREATE_MQ_EXT,
667 sizeof(struct mbx_create_common_mq_ex),
670 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
672 ctx = &fwcmd->params.req.context;
675 ctx->v1.num_pages = num_pages;
676 ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
677 ctx->v1.cq_id = cq->cq_id;
679 ctx->v1.async_cq_id = cq->cq_id;
680 ctx->v1.async_cq_valid = 1;
681 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
682 ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
683 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
684 ctx->v1.async_evt_bitmap |=
685 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
688 ctx->v0.num_pages = num_pages;
689 ctx->v0.cq_id = cq->cq_id;
690 ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
692 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
693 ctx->v0.async_evt_bitmap = 0xffffffff;
696 mbx.u0.s.embedded = 1;
697 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
698 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
700 rc = oce_mbox_post(sc, &mbx, NULL);
702 rc = fwcmd->hdr.u0.rsp.status;
704 device_printf(sc->dev,"%s failed - cmd status: %d\n",
708 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
710 eq->cq[eq->cq_valid] = cq;
713 mq->cfg.q_len = (uint8_t) q_len;
715 mq->qstate = QCREATED;
718 mq->cq->cq_handler = oce_mq_handler;
723 device_printf(sc->dev, "MQ create failed\n");
730 * @brief Function to free a mailbox queue
731 * @param mq pointer to a mailbox queue
734 oce_mq_free(struct oce_mq *mq)
736 POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
738 struct mbx_destroy_common_mq *fwcmd;
743 if (mq->ring != NULL) {
744 oce_destroy_ring_buffer(sc, mq->ring);
746 if (mq->qstate == QCREATED) {
747 bzero(&mbx, sizeof (struct oce_mbx));
748 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
749 fwcmd->params.req.id = mq->mq_id;
750 (void) oce_destroy_q(sc, &mbx,
751 sizeof (struct mbx_destroy_common_mq),
754 mq->qstate = QDELETED;
757 if (mq->cq != NULL) {
758 oce_cq_del(sc, mq->cq);
767 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
768 * @param sc sofware handle to the device
769 * @param mbx mailbox command to send to the fw to delete the queue
770 * (mbx contains the queue information to delete)
771 * @param req_size the size of the mbx payload dependent on the qtype
772 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
773 * @returns 0 on success, failure otherwise
776 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
777 enum qtype qtype, int version)
779 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
786 opcode = OPCODE_COMMON_DESTROY_EQ;
787 subsys = MBX_SUBSYSTEM_COMMON;
790 opcode = OPCODE_COMMON_DESTROY_CQ;
791 subsys = MBX_SUBSYSTEM_COMMON;
794 opcode = OPCODE_COMMON_DESTROY_MQ;
795 subsys = MBX_SUBSYSTEM_COMMON;
798 opcode = NIC_DELETE_WQ;
799 subsys = MBX_SUBSYSTEM_NIC;
802 opcode = NIC_DELETE_RQ;
803 subsys = MBX_SUBSYSTEM_NIC;
809 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
810 opcode, MBX_TIMEOUT_SEC, req_size,
813 mbx->u0.s.embedded = 1;
814 mbx->payload_length = (uint32_t) req_size;
815 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
817 rc = oce_mbox_post(sc, mbx, NULL);
819 rc = hdr->u0.rsp.status;
821 device_printf(sc->dev,"%s failed - cmd status: %d\n",
827 * @brief Function to create a completion queue
828 * @param sc software handle to the device
829 * @param eq optional eq to be associated with to the cq
830 * @param q_len length of completion queue
831 * @param item_size size of completion queue items
832 * @param sol_event command context event
833 * @param is_eventable event table
834 * @param nodelay no delay flag
835 * @param ncoalesce no coalescence flag
836 * @returns pointer to the cq created, NULL on failure
839 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
843 uint32_t is_eventable,
844 uint32_t nodelay, uint32_t ncoalesce)
846 struct oce_cq *cq = NULL;
849 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
853 cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
859 cq->cq_cfg.q_len = q_len;
860 cq->cq_cfg.item_size = item_size;
861 cq->cq_cfg.nodelay = (uint8_t) nodelay;
863 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
867 sc->cq[sc->ncqs++] = cq;
872 device_printf(sc->dev, "CQ create failed\n");
878 * @brief Deletes the completion queue
879 * @param sc software handle to the device
880 * @param cq pointer to a completion queue
883 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
886 struct mbx_destroy_common_cq *fwcmd;
888 if (cq->ring != NULL) {
889 bzero(&mbx, sizeof(struct oce_mbx));
890 /* now fill the command */
891 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
892 fwcmd->params.req.id = cq->cq_id;
893 (void)oce_destroy_q(sc, &mbx,
894 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0);
895 /*NOW destroy the ring */
896 oce_destroy_ring_buffer(sc, cq->ring);
905 * @brief Start a receive queue
906 * @param rq pointer to a receive queue
909 oce_start_rq(struct oce_rq *rq)
911 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
915 rc = oce_alloc_rx_bufs(rq, 960);
917 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1);
920 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
926 * @brief Start a work queue
927 * @param wq pointer to a work queue
930 oce_start_wq(struct oce_wq *wq)
932 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
937 * @brief Start a mailbox queue
938 * @param mq pointer to a mailbox queue
941 oce_start_mq(struct oce_mq *mq)
943 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
948 * @brief Function to arm an EQ so that it can generate events
949 * @param sc software handle to the device
950 * @param qid id of the EQ returned by the fw at the time of creation
951 * @param npopped number of EQEs to arm
952 * @param rearm rearm bit enable/disable
953 * @param clearint bit to clear the interrupt condition because of which
957 oce_arm_eq(POCE_SOFTC sc,
958 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
960 eq_db_t eq_db = { 0 };
962 eq_db.bits.rearm = rearm;
963 eq_db.bits.event = 1;
964 eq_db.bits.num_popped = npopped;
965 eq_db.bits.clrint = clearint;
966 eq_db.bits.qid = qid;
967 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
972 * @brief Function to arm a CQ with CQEs
973 * @param sc software handle to the device
974 * @param qid id of the CQ returned by the fw at the time of creation
975 * @param npopped number of CQEs to arm
976 * @param rearm rearm bit enable/disable
978 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
980 cq_db_t cq_db = { 0 };
982 cq_db.bits.rearm = rearm;
983 cq_db.bits.num_popped = npopped;
984 cq_db.bits.event = 0;
985 cq_db.bits.qid = qid;
986 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
991 * @brief function to cleanup the eqs used during stop
992 * @param eq pointer to event queue structure
993 * @returns the number of EQs processed
996 oce_drain_eq(struct oce_eq *eq)
1000 uint16_t num_eqe = 0;
1001 POCE_SOFTC sc = eq->parent;
1004 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1008 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1009 BUS_DMASYNC_POSTWRITE);
1011 RING_GET(eq->ring, 1);
1015 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1020 oce_drain_wq_cq(struct oce_wq *wq)
1022 POCE_SOFTC sc = wq->parent;
1023 struct oce_cq *cq = wq->cq;
1024 struct oce_nic_tx_cqe *cqe;
1027 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1028 BUS_DMASYNC_POSTWRITE);
1031 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1032 if (cqe->u0.dw[3] == 0)
1035 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1036 BUS_DMASYNC_POSTWRITE);
1037 RING_GET(cq->ring, 1);
1042 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1047 * @brief function to drain a MCQ and process its CQEs
1048 * @param dev software handle to the device
1049 * @param cq pointer to the cq to drain
1050 * @returns the number of CQEs processed
1053 oce_drain_mq_cq(void *arg)
1055 /* TODO: additional code. */
1060 * @brief function to process a Recieve queue
1061 * @param arg pointer to the RQ to charge
1062 * @return number of cqes processed
1065 oce_drain_rq_cq(struct oce_rq *rq)
1067 struct oce_nic_rx_cqe *cqe;
1068 uint16_t num_cqe = 0;
1074 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1075 /* dequeue till you reach an invalid cqe */
1076 while (RQ_CQE_VALID(cqe)) {
1077 RQ_CQE_INVALIDATE(cqe);
1078 RING_GET(cq->ring, 1);
1079 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1080 struct oce_nic_rx_cqe);
1083 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1089 oce_free_posted_rxbuf(struct oce_rq *rq)
1091 struct oce_packet_desc *pd;
1093 while (rq->pending) {
1094 pd = &rq->pckts[rq->ring->cidx];
1095 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1096 bus_dmamap_unload(rq->tag, pd->map);
1097 if (pd->mbuf != NULL) {
1102 RING_GET(rq->ring,1);
1109 oce_rx_cq_clean_hwlro(struct oce_rq *rq)
1111 struct oce_cq *cq = rq->cq;
1112 POCE_SOFTC sc = rq->parent;
1113 struct nic_hwlro_singleton_cqe *cqe;
1114 struct nic_hwlro_cqe_part2 *cqe2;
1116 int flush_compl = 0;
1120 bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1121 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
1123 if(cqe->cqe_type == 0) { /* singleton cqe */
1124 /* we should not get singleton cqe after cqe1 on same rq */
1125 if(rq->cqe_firstpart != NULL) {
1126 device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
1127 goto exit_rx_cq_clean_hwlro;
1129 num_frags = cqe->pkt_size / rq->cfg.frag_size;
1130 if(cqe->pkt_size % rq->cfg.frag_size)
1132 oce_discard_rx_comp(rq, num_frags);
1133 /* Check if CQE is flush completion */
1137 RING_GET(cq->ring, 1);
1138 }else if(cqe->cqe_type == 0x1) { /* first part */
1139 /* we should not get cqe1 after cqe1 on same rq */
1140 if(rq->cqe_firstpart != NULL) {
1141 device_printf(sc->dev, "Got cqe1 after cqe1 \n");
1142 goto exit_rx_cq_clean_hwlro;
1144 rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
1145 RING_GET(cq->ring, 1);
1146 }else if(cqe->cqe_type == 0x2) { /* second part */
1147 cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
1148 /* We should not get cqe2 without cqe1 */
1149 if(rq->cqe_firstpart == NULL) {
1150 device_printf(sc->dev, "Got cqe2 without cqe1 \n");
1151 goto exit_rx_cq_clean_hwlro;
1153 num_frags = cqe2->coalesced_size / rq->cfg.frag_size;
1154 if(cqe2->coalesced_size % rq->cfg.frag_size)
1157 /* Flush completion will always come in singleton CQE */
1158 oce_discard_rx_comp(rq, num_frags);
1160 rq->cqe_firstpart->valid = 0;
1162 rq->cqe_firstpart = NULL;
1163 RING_GET(cq->ring, 1);
1165 oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1169 if (flush_wait++ > 100) {
1170 device_printf(sc->dev, "did not receive hwlro flush compl\n");
1173 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1178 /* After cleanup, leave the CQ in unarmed state */
1179 oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1181 exit_rx_cq_clean_hwlro:
1186 oce_rx_cq_clean(struct oce_rq *rq)
1188 struct oce_nic_rx_cqe *cqe;
1192 int flush_compl = 0;
1197 bus_dmamap_sync(cq->ring->dma.tag,
1198 cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
1199 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1200 if(RQ_CQE_VALID(cqe)) {
1201 DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
1202 oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
1203 /* Check if CQE is flush completion */
1204 if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0))
1207 RQ_CQE_INVALIDATE(cqe);
1208 RING_GET(cq->ring, 1);
1209 #if defined(INET6) || defined(INET)
1210 if (IF_LRO_ENABLED(sc))
1211 oce_rx_flush_lro(rq);
1213 oce_arm_cq(sc, cq->cq_id, 1, FALSE);
1217 if (flush_wait++ > 100) {
1218 device_printf(sc->dev, "did not receive flush compl\n");
1221 oce_arm_cq(sc, cq->cq_id, 0, TRUE);
1226 /* After cleanup, leave the CQ in unarmed state */
1227 oce_arm_cq(sc, cq->cq_id, 0, FALSE);
1231 oce_stop_rx(POCE_SOFTC sc)
1233 struct epoch_tracker et;
1235 struct mbx_delete_nic_rq *fwcmd;
1236 struct mbx_delete_nic_rq_v1 *fwcmd1;
1240 NET_EPOCH_ENTER(et);
1241 /* before deleting disable hwlro */
1242 if(sc->enable_hwlro)
1243 oce_mbox_nic_set_iface_lro_config(sc, 0);
1245 for_all_rq_queues(sc, rq, i) {
1246 if (rq->qstate == QCREATED) {
1247 /* Delete rxq in firmware */
1250 bzero(&mbx, sizeof(mbx));
1252 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1253 fwcmd->params.req.rq_id = rq->rq_id;
1254 (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
1256 fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
1257 fwcmd1->params.req.rq_id = rq->rq_id;
1258 fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
1260 (void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1);
1262 rq->qstate = QDELETED;
1267 oce_rx_cq_clean(rq);
1269 oce_rx_cq_clean_hwlro(rq);
1271 /* Free posted RX buffers that are not used */
1272 oce_free_posted_rxbuf(rq);
1273 UNLOCK(&rq->rx_lock);
1280 oce_start_rx(POCE_SOFTC sc)
1285 for_all_rq_queues(sc, rq, i) {
1286 if (rq->qstate == QCREATED)
1288 if((i == 0) || (!sc->enable_hwlro)) {
1289 rc = oce_mbox_create_rq(rq);
1294 rc = oce_mbox_create_rq_v2(rq);
1299 /* reset queue pointers */
1300 rq->qstate = QCREATED;
1306 if(sc->enable_hwlro) {
1307 rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
1315 if (is_rss_enabled(sc)) {
1316 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1324 device_printf(sc->dev, "Start RX failed\n");