2 * Copyright (C) 2013 Emulex
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Emulex Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Contact Information:
32 * freebsd-drivers@emulex.com
36 * Costa Mesa, CA 92626
46 /*****************************************************
47 * local queue functions
48 *****************************************************/
50 static struct oce_wq *oce_wq_init(POCE_SOFTC sc,
51 uint32_t q_len, uint32_t wq_type);
52 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
53 static void oce_wq_free(struct oce_wq *wq);
54 static void oce_wq_del(struct oce_wq *wq);
55 static struct oce_rq *oce_rq_init(POCE_SOFTC sc,
58 uint32_t mtu, uint32_t rss);
59 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
60 static void oce_rq_free(struct oce_rq *rq);
61 static void oce_rq_del(struct oce_rq *rq);
62 static struct oce_eq *oce_eq_create(POCE_SOFTC sc,
67 static void oce_eq_del(struct oce_eq *eq);
68 static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
69 struct oce_eq *eq, uint32_t q_len);
70 static void oce_mq_free(struct oce_mq *mq);
71 static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
72 *mbx, size_t req_size, enum qtype qtype);
73 struct oce_cq *oce_cq_create(POCE_SOFTC sc,
78 uint32_t is_eventable,
79 uint32_t nodelay, uint32_t ncoalesce);
80 static void oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq);
85 * @brief Create and initialize all the queues on the board
86 * @param sc software handle to the device
87 * @returns 0 if successful, or error
90 oce_queue_init_all(POCE_SOFTC sc)
92 int rc = 0, i, vector;
95 struct oce_aic_obj *aic;
97 /* alloc TX/RX queues */
98 for_all_wq_queues(sc, wq, i) {
99 sc->wq[i] = oce_wq_init(sc, sc->tx_ring_size,
100 NIC_WQ_TYPE_STANDARD);
106 for_all_rq_queues(sc, rq, i) {
107 sc->rq[i] = oce_rq_init(sc, sc->rx_ring_size, sc->rq_frag_size,
108 OCE_MAX_JUMBO_FRAME_SIZE,
109 (i == 0) ? 0 : is_rss_enabled(sc));
114 /* Create network interface on card */
115 if (oce_create_nw_interface(sc))
118 /* create all of the event queues */
119 for (vector = 0; vector < sc->intr_count; vector++) {
120 /* setup aic defaults for each event queue */
121 aic = &sc->aic_obj[vector];
122 aic->max_eqd = OCE_MAX_EQD;
123 aic->min_eqd = OCE_MIN_EQD;
124 aic->et_eqd = OCE_MIN_EQD;
127 sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
133 /* create Tx, Rx and mcc queues */
134 for_all_wq_queues(sc, wq, i) {
135 rc = oce_wq_create(wq, sc->eq[i]);
139 TASK_INIT(&wq->txtask, 1, oce_tx_task, wq);
142 for_all_rq_queues(sc, rq, i) {
143 rc = oce_rq_create(rq, sc->if_id,
144 sc->eq[(i == 0) ? 0:(i-1)]);
150 sc->mq = oce_mq_create(sc, sc->eq[0], 64);
157 oce_queue_release_all(sc);
164 * @brief Releases all mailbox queues created
165 * @param sc software handle to the device
168 oce_queue_release_all(POCE_SOFTC sc)
175 for_all_rq_queues(sc, rq, i) {
177 oce_rq_del(sc->rq[i]);
178 oce_rq_free(sc->rq[i]);
182 for_all_wq_queues(sc, wq, i) {
184 oce_wq_del(sc->wq[i]);
185 oce_wq_free(sc->wq[i]);
192 for_all_evnt_queues(sc, eq, i) {
194 oce_eq_del(sc->eq[i]);
201 * @brief Function to create a WQ for NIC Tx
202 * @param sc software handle to the device
203 * @param qlen number of entries in the queue
204 * @param wq_type work queue type
205 * @returns the pointer to the WQ created or NULL on failure
208 oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
213 /* q_len must be min 256 and max 2k */
214 if (q_len < 256 || q_len > 2048) {
215 device_printf(sc->dev,
216 "Invalid q length. Must be "
217 "[256, 2000]: 0x%x\n", q_len);
222 wq = malloc(sizeof(struct oce_wq), M_DEVBUF, M_NOWAIT | M_ZERO);
226 /* Set the wq config */
227 wq->cfg.q_len = q_len;
228 wq->cfg.wq_type = (uint8_t) wq_type;
229 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
230 wq->cfg.nbufs = 2 * wq->cfg.q_len;
231 wq->cfg.nhdl = 2 * wq->cfg.q_len;
233 wq->parent = (void *)sc;
235 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
242 PAGE_SIZE, 0, NULL, NULL, &wq->tag);
248 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
249 rc = bus_dmamap_create(wq->tag, 0, &wq->pckts[i].map);
254 wq->ring = oce_create_ring_buffer(sc, q_len, NIC_WQE_SIZE);
259 LOCK_CREATE(&wq->tx_lock, "TX_lock");
261 #if __FreeBSD_version >= 800000
262 /* Allocate buf ring for multiqueue*/
263 wq->br = buf_ring_alloc(4096, M_DEVBUF,
264 M_WAITOK, &wq->tx_lock.mutex);
272 device_printf(sc->dev, "Create WQ failed\n");
280 * @brief Frees the work queue
281 * @param wq pointer to work queue to free
284 oce_wq_free(struct oce_wq *wq)
286 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
289 taskqueue_drain(taskqueue_swi, &wq->txtask);
291 if (wq->ring != NULL) {
292 oce_destroy_ring_buffer(sc, wq->ring);
296 for (i = 0; i < OCE_WQ_PACKET_ARRAY_SIZE; i++) {
297 if (wq->pckts[i].map != NULL) {
298 bus_dmamap_unload(wq->tag, wq->pckts[i].map);
299 bus_dmamap_destroy(wq->tag, wq->pckts[i].map);
300 wq->pckts[i].map = NULL;
305 bus_dma_tag_destroy(wq->tag);
307 buf_ring_free(wq->br, M_DEVBUF);
309 LOCK_DESTROY(&wq->tx_lock);
316 * @brief Create a work queue
317 * @param wq pointer to work queue
318 * @param eq pointer to associated event queue
321 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
323 POCE_SOFTC sc = wq->parent;
328 cq = oce_cq_create(sc,
331 sizeof(struct oce_nic_tx_cqe), 0, 1, 0, 3);
338 rc = oce_mbox_create_wq(wq);
342 wq->qstate = QCREATED;
343 wq->wq_free = wq->cfg.q_len;
347 eq->cq[eq->cq_valid] = cq;
350 cq->cq_handler = oce_wq_handler;
355 device_printf(sc->dev, "WQ create failed\n");
364 * @brief Delete a work queue
365 * @param wq pointer to work queue
368 oce_wq_del(struct oce_wq *wq)
371 struct mbx_delete_nic_wq *fwcmd;
372 POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
374 if (wq->qstate == QCREATED) {
375 bzero(&mbx, sizeof(struct oce_mbx));
376 /* now fill the command */
377 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
378 fwcmd->params.req.wq_id = wq->wq_id;
379 (void)oce_destroy_q(sc, &mbx,
380 sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
381 wq->qstate = QDELETED;
384 if (wq->cq != NULL) {
385 oce_cq_del(sc, wq->cq);
393 * @brief function to allocate receive queue resources
394 * @param sc software handle to the device
395 * @param q_len length of receive queue
396 * @param frag_size size of an receive queue fragment
397 * @param mtu maximum transmission unit
398 * @param rss is-rss-queue flag
399 * @returns the pointer to the RQ created or NULL on failure
402 oce_rq *oce_rq_init(POCE_SOFTC sc,
405 uint32_t mtu, uint32_t rss)
410 if (OCE_LOG2(frag_size) <= 0)
413 if ((q_len == 0) || (q_len > 1024))
416 /* allocate the rq */
417 rq = malloc(sizeof(struct oce_rq), M_DEVBUF, M_NOWAIT | M_ZERO);
422 rq->cfg.q_len = q_len;
423 rq->cfg.frag_size = frag_size;
426 rq->lro_pkts_queued = 0;
427 rq->cfg.is_rss_queue = rss;
432 rq->parent = (void *)sc;
434 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
440 1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
445 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
446 rc = bus_dmamap_create(rq->tag, 0, &rq->pckts[i].map);
451 /* create the ring buffer */
452 rq->ring = oce_create_ring_buffer(sc, q_len,
453 sizeof(struct oce_nic_rqe));
457 LOCK_CREATE(&rq->rx_lock, "RX_lock");
462 device_printf(sc->dev, "Create RQ failed\n");
471 * @brief Free a receive queue
472 * @param rq pointer to receive queue
475 oce_rq_free(struct oce_rq *rq)
477 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
480 if (rq->ring != NULL) {
481 oce_destroy_ring_buffer(sc, rq->ring);
484 for (i = 0; i < OCE_RQ_PACKET_ARRAY_SIZE; i++) {
485 if (rq->pckts[i].map != NULL) {
486 bus_dmamap_unload(rq->tag, rq->pckts[i].map);
487 bus_dmamap_destroy(rq->tag, rq->pckts[i].map);
488 rq->pckts[i].map = NULL;
490 if (rq->pckts[i].mbuf) {
491 m_free(rq->pckts[i].mbuf);
492 rq->pckts[i].mbuf = NULL;
497 bus_dma_tag_destroy(rq->tag);
499 LOCK_DESTROY(&rq->rx_lock);
507 * @brief Create a receive queue
508 * @param rq receive queue
509 * @param if_id interface identifier index`
510 * @param eq pointer to event queue
513 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
515 POCE_SOFTC sc = rq->parent;
518 cq = oce_cq_create(sc,
521 sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
526 rq->cfg.if_id = if_id;
528 /* Dont create RQ here. Create in if_activate */
532 eq->cq[eq->cq_valid] = cq;
535 cq->cq_handler = oce_rq_handler;
545 * @brief Delete a receive queue
546 * @param rq receive queue
549 oce_rq_del(struct oce_rq *rq)
551 POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
553 struct mbx_delete_nic_rq *fwcmd;
555 if (rq->qstate == QCREATED) {
556 bzero(&mbx, sizeof(mbx));
558 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
559 fwcmd->params.req.rq_id = rq->rq_id;
560 (void)oce_destroy_q(sc, &mbx,
561 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
562 rq->qstate = QDELETED;
565 if (rq->cq != NULL) {
566 oce_cq_del(sc, rq->cq);
574 * @brief function to create an event queue
575 * @param sc software handle to the device
576 * @param q_len length of event queue
577 * @param item_size size of an event queue item
578 * @param eq_delay event queue delay
579 * @retval eq success, pointer to event queue
580 * @retval NULL failure
583 oce_eq *oce_eq_create(POCE_SOFTC sc, uint32_t q_len,
592 eq = malloc(sizeof(struct oce_eq), M_DEVBUF, M_NOWAIT | M_ZERO);
596 eq->parent = (void *)sc;
598 eq->ring = oce_create_ring_buffer(sc, q_len, item_size);
602 eq->eq_cfg.q_len = q_len;
603 eq->eq_cfg.item_size = item_size;
604 eq->eq_cfg.cur_eqd = (uint8_t) eq_delay;
606 rc = oce_mbox_create_eq(eq);
610 sc->intrs[sc->neqs++].eq = eq;
623 * @brief Function to delete an event queue
624 * @param eq pointer to an event queue
627 oce_eq_del(struct oce_eq *eq)
630 struct mbx_destroy_common_eq *fwcmd;
631 POCE_SOFTC sc = (POCE_SOFTC) eq->parent;
633 if (eq->eq_id != 0xffff) {
634 bzero(&mbx, sizeof(mbx));
635 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
636 fwcmd->params.req.id = eq->eq_id;
637 (void)oce_destroy_q(sc, &mbx,
638 sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
641 if (eq->ring != NULL) {
642 oce_destroy_ring_buffer(sc, eq->ring);
654 * @brief Function to create an MQ
655 * @param sc software handle to the device
656 * @param eq the EQ to associate with the MQ for event notification
657 * @param q_len the number of entries to create in the MQ
658 * @returns pointer to the created MQ, failure otherwise
660 static struct oce_mq *
661 oce_mq_create(POCE_SOFTC sc, struct oce_eq *eq, uint32_t q_len)
664 struct mbx_create_common_mq_ex *fwcmd = NULL;
665 struct oce_mq *mq = NULL;
668 oce_mq_ext_ctx_t *ctx;
673 cq = oce_cq_create(sc, eq, CQ_LEN_256,
674 sizeof(struct oce_mq_cqe), 1, 1, 0, 0);
678 /* allocate the mq */
679 mq = malloc(sizeof(struct oce_mq), M_DEVBUF, M_NOWAIT | M_ZERO);
687 mq->ring = oce_create_ring_buffer(sc, q_len, sizeof(struct oce_mbx));
691 bzero(&mbx, sizeof(struct oce_mbx));
693 IS_XE201(sc) ? (version = OCE_MBX_VER_V1) : (version = OCE_MBX_VER_V0);
694 fwcmd = (struct mbx_create_common_mq_ex *)&mbx.payload;
695 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
696 MBX_SUBSYSTEM_COMMON,
697 OPCODE_COMMON_CREATE_MQ_EXT,
699 sizeof(struct mbx_create_common_mq_ex),
702 num_pages = oce_page_list(mq->ring, &fwcmd->params.req.pages[0]);
703 page_size = mq->ring->num_items * mq->ring->item_size;
705 ctx = &fwcmd->params.req.context;
708 ctx->v1.num_pages = num_pages;
709 ctx->v1.ring_size = OCE_LOG2(q_len) + 1;
710 ctx->v1.cq_id = cq->cq_id;
712 ctx->v1.async_cq_id = cq->cq_id;
713 ctx->v1.async_cq_valid = 1;
714 /* Subscribe to Link State and Group 5 Events(bits 1 & 5 set) */
715 ctx->v1.async_evt_bitmap |= LE_32(0x00000022);
716 ctx->v1.async_evt_bitmap |= LE_32(1 << ASYNC_EVENT_CODE_DEBUG);
717 ctx->v1.async_evt_bitmap |=
718 LE_32(1 << ASYNC_EVENT_CODE_SLIPORT);
721 ctx->v0.num_pages = num_pages;
722 ctx->v0.cq_id = cq->cq_id;
723 ctx->v0.ring_size = OCE_LOG2(q_len) + 1;
725 /* Subscribe to Link State and Group5 Events(bits 1 & 5 set) */
726 ctx->v0.async_evt_bitmap = 0xffffffff;
729 mbx.u0.s.embedded = 1;
730 mbx.payload_length = sizeof(struct mbx_create_common_mq_ex);
731 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
733 rc = oce_mbox_post(sc, &mbx, NULL);
735 rc = fwcmd->hdr.u0.rsp.status;
737 device_printf(sc->dev,"%s failed - cmd status: %d\n",
741 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
743 eq->cq[eq->cq_valid] = cq;
746 mq->cfg.q_len = (uint8_t) q_len;
748 mq->qstate = QCREATED;
751 mq->cq->cq_handler = oce_mq_handler;
756 device_printf(sc->dev, "MQ create failed\n");
767 * @brief Function to free a mailbox queue
768 * @param mq pointer to a mailbox queue
771 oce_mq_free(struct oce_mq *mq)
773 POCE_SOFTC sc = (POCE_SOFTC) mq->parent;
775 struct mbx_destroy_common_mq *fwcmd;
780 if (mq->ring != NULL) {
781 oce_destroy_ring_buffer(sc, mq->ring);
783 if (mq->qstate == QCREATED) {
784 bzero(&mbx, sizeof (struct oce_mbx));
785 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
786 fwcmd->params.req.id = mq->mq_id;
787 (void) oce_destroy_q(sc, &mbx,
788 sizeof (struct mbx_destroy_common_mq),
791 mq->qstate = QDELETED;
794 if (mq->cq != NULL) {
795 oce_cq_del(sc, mq->cq);
806 * @brief Function to delete a EQ, CQ, MQ, WQ or RQ
807 * @param sc sofware handle to the device
808 * @param mbx mailbox command to send to the fw to delete the queue
809 * (mbx contains the queue information to delete)
810 * @param req_size the size of the mbx payload dependent on the qtype
811 * @param qtype the type of queue i.e. EQ, CQ, MQ, WQ or RQ
812 * @returns 0 on success, failure otherwise
815 oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
818 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
825 opcode = OPCODE_COMMON_DESTROY_EQ;
826 subsys = MBX_SUBSYSTEM_COMMON;
829 opcode = OPCODE_COMMON_DESTROY_CQ;
830 subsys = MBX_SUBSYSTEM_COMMON;
833 opcode = OPCODE_COMMON_DESTROY_MQ;
834 subsys = MBX_SUBSYSTEM_COMMON;
837 opcode = NIC_DELETE_WQ;
838 subsys = MBX_SUBSYSTEM_NIC;
841 opcode = NIC_DELETE_RQ;
842 subsys = MBX_SUBSYSTEM_NIC;
848 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
849 opcode, MBX_TIMEOUT_SEC, req_size,
852 mbx->u0.s.embedded = 1;
853 mbx->payload_length = (uint32_t) req_size;
854 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
856 rc = oce_mbox_post(sc, mbx, NULL);
858 rc = hdr->u0.rsp.status;
860 device_printf(sc->dev,"%s failed - cmd status: %d\n",
868 * @brief Function to create a completion queue
869 * @param sc software handle to the device
870 * @param eq optional eq to be associated with to the cq
871 * @param q_len length of completion queue
872 * @param item_size size of completion queue items
873 * @param sol_event command context event
874 * @param is_eventable event table
875 * @param nodelay no delay flag
876 * @param ncoalesce no coalescence flag
877 * @returns pointer to the cq created, NULL on failure
880 oce_cq_create(POCE_SOFTC sc, struct oce_eq *eq,
884 uint32_t is_eventable,
885 uint32_t nodelay, uint32_t ncoalesce)
887 struct oce_cq *cq = NULL;
890 cq = malloc(sizeof(struct oce_cq), M_DEVBUF, M_NOWAIT | M_ZERO);
894 cq->ring = oce_create_ring_buffer(sc, q_len, item_size);
900 cq->cq_cfg.q_len = q_len;
901 cq->cq_cfg.item_size = item_size;
902 cq->cq_cfg.nodelay = (uint8_t) nodelay;
904 rc = oce_mbox_cq_create(cq, ncoalesce, is_eventable);
908 sc->cq[sc->ncqs++] = cq;
913 device_printf(sc->dev, "CQ create failed\n");
921 * @brief Deletes the completion queue
922 * @param sc software handle to the device
923 * @param cq pointer to a completion queue
926 oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
929 struct mbx_destroy_common_cq *fwcmd;
931 if (cq->ring != NULL) {
933 bzero(&mbx, sizeof(struct oce_mbx));
934 /* now fill the command */
935 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
936 fwcmd->params.req.id = cq->cq_id;
937 (void)oce_destroy_q(sc, &mbx,
938 sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
939 /*NOW destroy the ring */
940 oce_destroy_ring_buffer(sc, cq->ring);
951 * @brief Start a receive queue
952 * @param rq pointer to a receive queue
955 oce_start_rq(struct oce_rq *rq)
959 rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
962 oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
969 * @brief Start a work queue
970 * @param wq pointer to a work queue
973 oce_start_wq(struct oce_wq *wq)
975 oce_arm_cq(wq->parent, wq->cq->cq_id, 0, TRUE);
982 * @brief Start a mailbox queue
983 * @param mq pointer to a mailbox queue
986 oce_start_mq(struct oce_mq *mq)
988 oce_arm_cq(mq->parent, mq->cq->cq_id, 0, TRUE);
995 * @brief Function to arm an EQ so that it can generate events
996 * @param sc software handle to the device
997 * @param qid id of the EQ returned by the fw at the time of creation
998 * @param npopped number of EQEs to arm
999 * @param rearm rearm bit enable/disable
1000 * @param clearint bit to clear the interrupt condition because of which
1001 * EQEs are generated
1004 oce_arm_eq(POCE_SOFTC sc,
1005 int16_t qid, int npopped, uint32_t rearm, uint32_t clearint)
1007 eq_db_t eq_db = { 0 };
1009 eq_db.bits.rearm = rearm;
1010 eq_db.bits.event = 1;
1011 eq_db.bits.num_popped = npopped;
1012 eq_db.bits.clrint = clearint;
1013 eq_db.bits.qid = qid;
1014 OCE_WRITE_REG32(sc, db, PD_EQ_DB, eq_db.dw0);
1022 * @brief Function to arm a CQ with CQEs
1023 * @param sc software handle to the device
1024 * @param qid id of the CQ returned by the fw at the time of creation
1025 * @param npopped number of CQEs to arm
1026 * @param rearm rearm bit enable/disable
1028 void oce_arm_cq(POCE_SOFTC sc, int16_t qid, int npopped, uint32_t rearm)
1030 cq_db_t cq_db = { 0 };
1032 cq_db.bits.rearm = rearm;
1033 cq_db.bits.num_popped = npopped;
1034 cq_db.bits.event = 0;
1035 cq_db.bits.qid = qid;
1036 OCE_WRITE_REG32(sc, db, PD_CQ_DB, cq_db.dw0);
1044 * @brief function to cleanup the eqs used during stop
1045 * @param eq pointer to event queue structure
1046 * @returns the number of EQs processed
1049 oce_drain_eq(struct oce_eq *eq)
1052 struct oce_eqe *eqe;
1053 uint16_t num_eqe = 0;
1054 POCE_SOFTC sc = eq->parent;
1057 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1061 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
1062 BUS_DMASYNC_POSTWRITE);
1064 RING_GET(eq->ring, 1);
1068 oce_arm_eq(sc, eq->eq_id, num_eqe, FALSE, TRUE);
1075 oce_drain_wq_cq(struct oce_wq *wq)
1077 POCE_SOFTC sc = wq->parent;
1078 struct oce_cq *cq = wq->cq;
1079 struct oce_nic_tx_cqe *cqe;
1082 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1083 BUS_DMASYNC_POSTWRITE);
1086 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
1087 if (cqe->u0.dw[3] == 0)
1090 bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map,
1091 BUS_DMASYNC_POSTWRITE);
1092 RING_GET(cq->ring, 1);
1097 oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
1103 * @brief function to drain a MCQ and process its CQEs
1104 * @param dev software handle to the device
1105 * @param cq pointer to the cq to drain
1106 * @returns the number of CQEs processed
1109 oce_drain_mq_cq(void *arg)
1111 /* TODO: additional code. */
1118 * @brief function to process a Recieve queue
1119 * @param arg pointer to the RQ to charge
1120 * @return number of cqes processed
1123 oce_drain_rq_cq(struct oce_rq *rq)
1125 struct oce_nic_rx_cqe *cqe;
1126 uint16_t num_cqe = 0;
1132 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
1133 /* dequeue till you reach an invalid cqe */
1134 while (RQ_CQE_VALID(cqe)) {
1135 RQ_CQE_INVALIDATE(cqe);
1136 RING_GET(cq->ring, 1);
1137 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
1138 struct oce_nic_rx_cqe);
1141 oce_arm_cq(sc, cq->cq_id, num_cqe, FALSE);
1148 oce_free_posted_rxbuf(struct oce_rq *rq)
1150 struct oce_packet_desc *pd;
1152 while (rq->pending) {
1154 pd = &rq->pckts[rq->packets_out];
1155 bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
1156 bus_dmamap_unload(rq->tag, pd->map);
1157 if (pd->mbuf != NULL) {
1162 if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
1163 rq->packets_out = 0;
1173 oce_stop_rx(POCE_SOFTC sc)
1176 struct mbx_delete_nic_rq *fwcmd;
1180 for_all_rq_queues(sc, rq, i) {
1181 if (rq->qstate == QCREATED) {
1182 /* Delete rxq in firmware */
1184 bzero(&mbx, sizeof(mbx));
1185 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1186 fwcmd->params.req.rq_id = rq->rq_id;
1188 (void)oce_destroy_q(sc, &mbx,
1189 sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
1191 rq->qstate = QDELETED;
1195 /* Free posted RX buffers that are not used */
1196 oce_free_posted_rxbuf(rq);
1205 oce_start_rx(POCE_SOFTC sc)
1210 for_all_rq_queues(sc, rq, i) {
1211 if (rq->qstate == QCREATED)
1213 rc = oce_mbox_create_rq(rq);
1216 /* reset queue pointers */
1217 rq->qstate = QCREATED;
1222 rq->packets_out = 0;
1228 if (is_rss_enabled(sc)) {
1229 rc = oce_config_nic_rss(sc, (uint8_t) sc->if_id, RSS_ENABLE);
1237 device_printf(sc->dev, "Start RX failed\n");