2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2021 Microsoft Corp.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/types.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <machine/bus.h>
45 #include "hw_channel.h"
48 mana_hwc_get_msg_index(struct hw_channel_context *hwc, uint16_t *msg_id)
50 struct gdma_resource *r = &hwc->inflight_msg_res;
53 sema_wait(&hwc->sema);
55 mtx_lock_spin(&r->lock_spin);
57 index = find_first_zero_bit(hwc->inflight_msg_res.map,
58 hwc->inflight_msg_res.size);
60 bitmap_set(hwc->inflight_msg_res.map, index, 1);
62 mtx_unlock_spin(&r->lock_spin);
70 mana_hwc_put_msg_index(struct hw_channel_context *hwc, uint16_t msg_id)
72 struct gdma_resource *r = &hwc->inflight_msg_res;
74 mtx_lock_spin(&r->lock_spin);
75 bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
76 mtx_unlock_spin(&r->lock_spin);
78 sema_post(&hwc->sema);
82 mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
83 const struct gdma_resp_hdr *resp_msg,
86 if (resp_len < sizeof(*resp_msg))
89 if (resp_len > caller_ctx->output_buflen)
96 mana_hwc_handle_resp(struct hw_channel_context *hwc, uint32_t resp_len,
97 const struct gdma_resp_hdr *resp_msg)
99 struct hwc_caller_ctx *ctx;
102 if (!test_bit(resp_msg->response.hwc_msg_id,
103 hwc->inflight_msg_res.map)) {
104 device_printf(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
105 resp_msg->response.hwc_msg_id);
109 ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
110 err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
114 ctx->status_code = resp_msg->status;
116 memcpy(ctx->output_buf, resp_msg, resp_len);
119 complete(&ctx->comp_event);
123 mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
124 struct hwc_work_request *req)
126 device_t dev = hwc_rxq->hwc->dev;
127 struct gdma_sge *sge;
131 sge->address = (uint64_t)req->buf_sge_addr;
132 sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
133 sge->size = req->buf_len;
135 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
136 req->wqe_req.sgl = sge;
137 req->wqe_req.num_sge = 1;
138 req->wqe_req.client_data_unit = 0;
140 err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
143 "Failed to post WQE on HWC RQ: %d\n", err);
148 mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
149 struct gdma_event *event)
151 struct hw_channel_context *hwc = ctx;
152 struct gdma_dev *gd = hwc->gdma_dev;
153 union hwc_init_type_data type_data;
154 union hwc_init_eq_id_db eq_db;
157 switch (event->type) {
158 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
159 eq_db.as_uint32 = event->details[0];
160 hwc->cq->gdma_eq->id = eq_db.eq_id;
161 gd->doorbell = eq_db.doorbell;
164 case GDMA_EQE_HWC_INIT_DATA:
165 type_data.as_uint32 = event->details[0];
166 type = type_data.type;
167 val = type_data.value;
170 case HWC_INIT_DATA_CQID:
171 hwc->cq->gdma_cq->id = val;
174 case HWC_INIT_DATA_RQID:
175 hwc->rxq->gdma_wq->id = val;
178 case HWC_INIT_DATA_SQID:
179 hwc->txq->gdma_wq->id = val;
182 case HWC_INIT_DATA_QUEUE_DEPTH:
183 hwc->hwc_init_q_depth_max = (uint16_t)val;
186 case HWC_INIT_DATA_MAX_REQUEST:
187 hwc->hwc_init_max_req_msg_size = val;
190 case HWC_INIT_DATA_MAX_RESPONSE:
191 hwc->hwc_init_max_resp_msg_size = val;
194 case HWC_INIT_DATA_MAX_NUM_CQS:
195 gd->gdma_context->max_num_cqs = val;
198 case HWC_INIT_DATA_PDID:
199 hwc->gdma_dev->pdid = val;
202 case HWC_INIT_DATA_GPA_MKEY:
203 hwc->rxq->msg_buf->gpa_mkey = val;
204 hwc->txq->msg_buf->gpa_mkey = val;
210 case GDMA_EQE_HWC_INIT_DONE:
211 complete(&hwc->hwc_init_eqe_comp);
215 /* Ignore unknown events, which should never happen. */
221 mana_hwc_rx_event_handler(void *ctx, uint32_t gdma_rxq_id,
222 const struct hwc_rx_oob *rx_oob)
224 struct hw_channel_context *hwc = ctx;
225 struct hwc_wq *hwc_rxq = hwc->rxq;
226 struct hwc_work_request *rx_req;
227 struct gdma_resp_hdr *resp;
228 struct gdma_wqe *dma_oob;
229 struct gdma_queue *rq;
230 struct gdma_sge *sge;
231 uint64_t rq_base_addr;
235 if (hwc_rxq->gdma_wq->id != gdma_rxq_id) {
236 mana_warn(NULL, "unmatched rx queue %u != %u\n",
237 hwc_rxq->gdma_wq->id, gdma_rxq_id);
242 rq = hwc_rxq->gdma_wq;
243 wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
244 dma_oob = (struct gdma_wqe *)wqe;
246 bus_dmamap_sync(rq->mem_info.dma_tag, rq->mem_info.dma_map,
247 BUS_DMASYNC_POSTREAD);
249 sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
251 /* Select the RX work request for virtual address and for reposting. */
252 rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
253 rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
255 bus_dmamap_sync(hwc_rxq->msg_buf->mem_info.dma_tag,
256 hwc_rxq->msg_buf->mem_info.dma_map,
257 BUS_DMASYNC_POSTREAD);
259 rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
260 resp = (struct gdma_resp_hdr *)rx_req->buf_va;
262 if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
263 device_printf(hwc->dev, "HWC RX: wrong msg_id=%u\n",
264 resp->response.hwc_msg_id);
268 mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
270 /* Do no longer use 'resp', because the buffer is posted to the HW
271 * in the below mana_hwc_post_rx_wqe().
275 bus_dmamap_sync(hwc_rxq->msg_buf->mem_info.dma_tag,
276 hwc_rxq->msg_buf->mem_info.dma_map,
277 BUS_DMASYNC_PREREAD);
279 mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
283 mana_hwc_tx_event_handler(void *ctx, uint32_t gdma_txq_id,
284 const struct hwc_rx_oob *rx_oob)
286 struct hw_channel_context *hwc = ctx;
287 struct hwc_wq *hwc_txq = hwc->txq;
289 if (!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id) {
290 mana_warn(NULL, "unmatched tx queue %u != %u\n",
291 hwc_txq->gdma_wq->id, gdma_txq_id);
294 bus_dmamap_sync(hwc_txq->gdma_wq->mem_info.dma_tag,
295 hwc_txq->gdma_wq->mem_info.dma_map,
296 BUS_DMASYNC_POSTWRITE);
300 mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
301 enum gdma_queue_type type, uint64_t queue_size,
302 struct gdma_queue **queue)
304 struct gdma_queue_spec spec = {};
306 if (type != GDMA_SQ && type != GDMA_RQ)
310 spec.monitor_avl_buf = false;
311 spec.queue_size = queue_size;
313 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
317 mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
319 void *ctx, gdma_cq_callback *cb,
320 struct gdma_queue *parent_eq,
321 struct gdma_queue **queue)
323 struct gdma_queue_spec spec = {};
326 spec.monitor_avl_buf = false;
327 spec.queue_size = queue_size;
328 spec.cq.context = ctx;
329 spec.cq.callback = cb;
330 spec.cq.parent_eq = parent_eq;
332 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
336 mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
338 void *ctx, gdma_eq_callback *cb,
339 struct gdma_queue **queue)
341 struct gdma_queue_spec spec = {};
344 spec.monitor_avl_buf = false;
345 spec.queue_size = queue_size;
346 spec.eq.context = ctx;
347 spec.eq.callback = cb;
348 spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
350 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
354 mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
356 struct hwc_rx_oob comp_data = {};
357 struct gdma_comp *completions;
358 struct hwc_cq *hwc_cq = ctx;
361 completions = hwc_cq->comp_buf;
362 comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
364 for (i = 0; i < comp_read; ++i) {
365 comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
367 if (completions[i].is_sq)
368 hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
369 completions[i].wq_num,
372 hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
373 completions[i].wq_num,
377 bus_dmamap_sync(q_self->mem_info.dma_tag, q_self->mem_info.dma_map,
378 BUS_DMASYNC_POSTREAD);
380 mana_gd_ring_cq(q_self, SET_ARM_BIT);
384 mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
386 if (hwc_cq->comp_buf)
387 free(hwc_cq->comp_buf, M_DEVBUF);
390 mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
393 mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
395 free(hwc_cq, M_DEVBUF);
399 mana_hwc_create_cq(struct hw_channel_context *hwc,
401 gdma_eq_callback *callback, void *ctx,
402 hwc_rx_event_handler_t *rx_ev_hdlr, void *rx_ev_ctx,
403 hwc_tx_event_handler_t *tx_ev_hdlr, void *tx_ev_ctx,
404 struct hwc_cq **hwc_cq_ptr)
406 struct gdma_queue *eq, *cq;
407 struct gdma_comp *comp_buf;
408 struct hwc_cq *hwc_cq;
409 uint32_t eq_size, cq_size;
412 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
413 if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
414 eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
416 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
417 if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
418 cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
420 hwc_cq = malloc(sizeof(*hwc_cq), M_DEVBUF, M_WAITOK | M_ZERO);
424 err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
426 device_printf(hwc->dev,
427 "Failed to create HWC EQ for RQ: %d\n", err);
430 hwc_cq->gdma_eq = eq;
432 err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq,
433 mana_hwc_comp_event, eq, &cq);
435 device_printf(hwc->dev,
436 "Failed to create HWC CQ for RQ: %d\n", err);
439 hwc_cq->gdma_cq = cq;
441 comp_buf = mallocarray(q_depth, sizeof(struct gdma_comp),
442 M_DEVBUF, M_WAITOK | M_ZERO);
449 hwc_cq->comp_buf = comp_buf;
450 hwc_cq->queue_depth = q_depth;
451 hwc_cq->rx_event_handler = rx_ev_hdlr;
452 hwc_cq->rx_event_ctx = rx_ev_ctx;
453 hwc_cq->tx_event_handler = tx_ev_hdlr;
454 hwc_cq->tx_event_ctx = tx_ev_ctx;
456 *hwc_cq_ptr = hwc_cq;
459 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
464 mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, uint16_t q_depth,
465 uint32_t max_msg_size,
466 struct hwc_dma_buf **dma_buf_ptr)
468 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
469 struct hwc_work_request *hwc_wr;
470 struct hwc_dma_buf *dma_buf;
471 struct gdma_mem_info *gmi;
478 dma_buf = malloc(sizeof(*dma_buf) +
479 q_depth * sizeof(struct hwc_work_request),
480 M_DEVBUF, M_WAITOK | M_ZERO);
484 dma_buf->num_reqs = q_depth;
486 buf_size = ALIGN(q_depth * max_msg_size, PAGE_SIZE);
488 gmi = &dma_buf->mem_info;
489 err = mana_gd_alloc_memory(gc, buf_size, gmi);
491 device_printf(hwc->dev,
492 "Failed to allocate DMA buffer: %d\n", err);
496 virt_addr = dma_buf->mem_info.virt_addr;
497 base_pa = (uint8_t *)dma_buf->mem_info.dma_handle;
499 for (i = 0; i < q_depth; i++) {
500 hwc_wr = &dma_buf->reqs[i];
502 hwc_wr->buf_va = (char *)virt_addr + i * max_msg_size;
503 hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
505 hwc_wr->buf_len = max_msg_size;
508 *dma_buf_ptr = dma_buf;
511 free(dma_buf, M_DEVBUF);
516 mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
517 struct hwc_dma_buf *dma_buf)
522 mana_gd_free_memory(&dma_buf->mem_info);
524 free(dma_buf, M_DEVBUF);
528 mana_hwc_destroy_wq(struct hw_channel_context *hwc,
529 struct hwc_wq *hwc_wq)
531 mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
534 mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
537 free(hwc_wq, M_DEVBUF);
541 mana_hwc_create_wq(struct hw_channel_context *hwc,
542 enum gdma_queue_type q_type, uint16_t q_depth,
543 uint32_t max_msg_size, struct hwc_cq *hwc_cq,
544 struct hwc_wq **hwc_wq_ptr)
546 struct gdma_queue *queue;
547 struct hwc_wq *hwc_wq;
551 if (q_type != GDMA_SQ && q_type != GDMA_RQ) {
552 /* XXX should fail and return error? */
553 mana_warn(NULL, "Invalid q_type %u\n", q_type);
556 if (q_type == GDMA_RQ)
557 queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
559 queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
561 if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
562 queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
564 hwc_wq = malloc(sizeof(*hwc_wq), M_DEVBUF, M_WAITOK | M_ZERO);
568 err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
573 hwc_wq->gdma_wq = queue;
574 hwc_wq->queue_depth = q_depth;
575 hwc_wq->hwc_cq = hwc_cq;
577 err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
582 *hwc_wq_ptr = hwc_wq;
586 mana_hwc_destroy_wq(hwc, hwc_wq);
591 mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
592 struct hwc_work_request *req,
593 uint32_t dest_virt_rq_id, uint32_t dest_virt_rcq_id,
596 device_t dev = hwc_txq->hwc->dev;
597 struct hwc_tx_oob *tx_oob;
598 struct gdma_sge *sge;
601 if (req->msg_size == 0 || req->msg_size > req->buf_len) {
602 device_printf(dev, "wrong msg_size: %u, buf_len: %u\n",
603 req->msg_size, req->buf_len);
607 tx_oob = &req->tx_oob;
609 tx_oob->vrq_id = dest_virt_rq_id;
610 tx_oob->dest_vfid = 0;
611 tx_oob->vrcq_id = dest_virt_rcq_id;
612 tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
613 tx_oob->loopback = false;
614 tx_oob->lso_override = false;
615 tx_oob->dest_pf = dest_pf;
616 tx_oob->vsq_id = hwc_txq->gdma_wq->id;
619 sge->address = (uint64_t)req->buf_sge_addr;
620 sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
621 sge->size = req->msg_size;
623 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
624 req->wqe_req.sgl = sge;
625 req->wqe_req.num_sge = 1;
626 req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
627 req->wqe_req.inline_oob_data = tx_oob;
628 req->wqe_req.client_data_unit = 0;
630 err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
633 "Failed to post WQE on HWC SQ: %d\n", err);
638 mana_hwc_init_inflight_msg(struct hw_channel_context *hwc, uint16_t num_msg)
642 sema_init(&hwc->sema, num_msg, "gdma hwc sema");
644 err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res,
645 "gdma hwc res lock");
647 device_printf(hwc->dev,
648 "Failed to init inflight_msg_res: %d\n", err);
654 mana_hwc_test_channel(struct hw_channel_context *hwc, uint16_t q_depth,
655 uint32_t max_req_msg_size, uint32_t max_resp_msg_size)
657 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
658 struct hwc_wq *hwc_rxq = hwc->rxq;
659 struct hwc_work_request *req;
660 struct hwc_caller_ctx *ctx;
664 /* Post all WQEs on the RQ */
665 for (i = 0; i < q_depth; i++) {
666 req = &hwc_rxq->msg_buf->reqs[i];
667 err = mana_hwc_post_rx_wqe(hwc_rxq, req);
672 ctx = malloc(q_depth * sizeof(struct hwc_caller_ctx),
673 M_DEVBUF, M_WAITOK | M_ZERO);
677 for (i = 0; i < q_depth; ++i)
678 init_completion(&ctx[i].comp_event);
680 hwc->caller_ctx = ctx;
682 return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
686 mana_hwc_establish_channel(struct gdma_context *gc, uint16_t *q_depth,
687 uint32_t *max_req_msg_size,
688 uint32_t *max_resp_msg_size)
690 struct hw_channel_context *hwc = gc->hwc.driver_data;
691 struct gdma_queue *rq = hwc->rxq->gdma_wq;
692 struct gdma_queue *sq = hwc->txq->gdma_wq;
693 struct gdma_queue *eq = hwc->cq->gdma_eq;
694 struct gdma_queue *cq = hwc->cq->gdma_cq;
697 init_completion(&hwc->hwc_init_eqe_comp);
699 err = mana_smc_setup_hwc(&gc->shm_channel, false,
700 eq->mem_info.dma_handle,
701 cq->mem_info.dma_handle,
702 rq->mem_info.dma_handle,
703 sq->mem_info.dma_handle,
708 if (wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * hz))
711 *q_depth = hwc->hwc_init_q_depth_max;
712 *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
713 *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
715 /* Both were set in mana_hwc_init_event_handler(). */
716 if (cq->id >= gc->max_num_cqs) {
717 mana_warn(NULL, "invalid cq id %u > %u\n",
718 cq->id, gc->max_num_cqs);
722 gc->cq_table = malloc(gc->max_num_cqs * sizeof(struct gdma_queue *),
723 M_DEVBUF, M_WAITOK | M_ZERO);
727 gc->cq_table[cq->id] = cq;
733 mana_hwc_init_queues(struct hw_channel_context *hwc, uint16_t q_depth,
734 uint32_t max_req_msg_size, uint32_t max_resp_msg_size)
738 err = mana_hwc_init_inflight_msg(hwc, q_depth);
742 /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
743 * queue depth and RQ queue depth.
745 err = mana_hwc_create_cq(hwc, q_depth * 2,
746 mana_hwc_init_event_handler, hwc,
747 mana_hwc_rx_event_handler, hwc,
748 mana_hwc_tx_event_handler, hwc, &hwc->cq);
750 device_printf(hwc->dev, "Failed to create HWC CQ: %d\n", err);
754 err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
757 device_printf(hwc->dev, "Failed to create HWC RQ: %d\n", err);
761 err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
764 device_printf(hwc->dev, "Failed to create HWC SQ: %d\n", err);
768 hwc->num_inflight_msg = q_depth;
769 hwc->max_req_msg_size = max_req_msg_size;
773 /* mana_hwc_create_channel() will do the cleanup.*/
778 mana_hwc_create_channel(struct gdma_context *gc)
780 uint32_t max_req_msg_size, max_resp_msg_size;
781 struct gdma_dev *gd = &gc->hwc;
782 struct hw_channel_context *hwc;
783 uint16_t q_depth_max;
786 hwc = malloc(sizeof(*hwc), M_DEVBUF, M_WAITOK | M_ZERO);
790 gd->gdma_context = gc;
791 gd->driver_data = hwc;
795 /* HWC's instance number is always 0. */
796 gd->dev_id.as_uint32 = 0;
797 gd->dev_id.type = GDMA_DEVICE_HWC;
799 gd->pdid = INVALID_PDID;
800 gd->doorbell = INVALID_DOORBELL;
803 * mana_hwc_init_queues() only creates the required data structures,
804 * and doesn't touch the HWC device.
806 err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
807 HW_CHANNEL_MAX_REQUEST_SIZE,
808 HW_CHANNEL_MAX_RESPONSE_SIZE);
810 device_printf(hwc->dev, "Failed to initialize HWC: %d\n",
815 err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
818 device_printf(hwc->dev, "Failed to establish HWC: %d\n", err);
822 err = mana_hwc_test_channel(gc->hwc.driver_data,
823 HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
824 max_req_msg_size, max_resp_msg_size);
826 /* Test failed, but the channel has been established */
827 device_printf(hwc->dev, "Failed to test HWC: %d\n", err);
833 mana_hwc_destroy_channel(gc);
838 mana_hwc_destroy_channel(struct gdma_context *gc)
840 struct hw_channel_context *hwc = gc->hwc.driver_data;
846 * gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
847 * non-zero, the HWC worked and we should tear down the HWC here.
849 if (gc->max_num_cqs > 0) {
850 mana_smc_teardown_hwc(&gc->shm_channel, false);
854 free(hwc->caller_ctx, M_DEVBUF);
855 hwc->caller_ctx = NULL;
858 mana_hwc_destroy_wq(hwc, hwc->txq);
861 mana_hwc_destroy_wq(hwc, hwc->rxq);
864 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
866 mana_gd_free_res_map(&hwc->inflight_msg_res);
868 hwc->num_inflight_msg = 0;
870 hwc->gdma_dev->doorbell = INVALID_DOORBELL;
871 hwc->gdma_dev->pdid = INVALID_PDID;
874 gc->hwc.driver_data = NULL;
875 gc->hwc.gdma_context = NULL;
877 free(gc->cq_table, M_DEVBUF);
882 mana_hwc_send_request(struct hw_channel_context *hwc, uint32_t req_len,
883 const void *req, uint32_t resp_len, void *resp)
885 struct hwc_work_request *tx_wr;
886 struct hwc_wq *txq = hwc->txq;
887 struct gdma_req_hdr *req_msg;
888 struct hwc_caller_ctx *ctx;
892 mana_hwc_get_msg_index(hwc, &msg_id);
894 tx_wr = &txq->msg_buf->reqs[msg_id];
896 if (req_len > tx_wr->buf_len) {
897 device_printf(hwc->dev,
898 "HWC: req msg size: %d > %d\n", req_len,
904 ctx = hwc->caller_ctx + msg_id;
905 ctx->output_buf = resp;
906 ctx->output_buflen = resp_len;
908 req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
910 memcpy(req_msg, req, req_len);
912 req_msg->req.hwc_msg_id = msg_id;
914 tx_wr->msg_size = req_len;
916 err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
918 device_printf(hwc->dev,
919 "HWC: Failed to post send WQE: %d\n", err);
923 if (wait_for_completion_timeout(&ctx->comp_event, 30 * hz)) {
924 device_printf(hwc->dev, "HWC: Request timed out!\n");
934 if (ctx->status_code) {
935 device_printf(hwc->dev,
936 "HWC: Failed hw_channel req: 0x%x\n", ctx->status_code);
941 mana_hwc_put_msg_index(hwc, msg_id);