2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
41 #include "ocs_hw_queues.h"
43 #define HW_QTOP_DEBUG 0
46 * @brief Initialize queues
48 * Given the parsed queue topology spec, the SLI queues are created and
51 * @param hw pointer to HW object
52 * @param qtop pointer to queue topology
54 * @return returns 0 for success, an error code value for failure.
57 ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
60 uint32_t default_lengths[QTOP_LAST], len;
61 uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0;
62 uint8_t rqset_filter_mask = 0;
63 hw_eq_t *eqs[hw->config.n_rq];
64 hw_cq_t *cqs[hw->config.n_rq];
65 hw_rq_t *rqs[hw->config.n_rq];
66 ocs_hw_qtop_entry_t *qt, *next_qt;
77 default_lengths[QTOP_EQ] = 1024;
78 default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ];
79 default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ];
80 default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ];
81 default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH;
83 ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG);
91 ocs_list_init(&hw->eq_list, hw_eq_t, link);
93 /* If MRQ is requested, Check if it is supported by SLI. */
94 if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) {
95 ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n");
96 return OCS_HW_RTN_ERROR;
99 if (hw->config.n_rq > 1)
102 /* Allocate class WQ pools */
103 for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
104 hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
105 if (hw->wq_class_array[i] == NULL) {
106 ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
107 return OCS_HW_RTN_NO_MEMORY;
111 /* Allocate per CPU WQ pools */
112 for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
113 hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
114 if (hw->wq_cpu_array[i] == NULL) {
115 ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
116 return OCS_HW_RTN_NO_MEMORY;
121 ocs_hw_assert(qtop != NULL);
123 for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
124 if (i == qtop->inuse_count - 1)
131 len = (qt->len) ? qt->len : default_lengths[QTOP_EQ];
133 if (qt->set_default) {
134 default_lengths[QTOP_EQ] = len;
138 eq = hw_new_eq(hw, len);
140 hw_queue_teardown(hw);
141 return OCS_HW_RTN_NO_MEMORY;
146 len = (qt->len) ? qt->len : default_lengths[QTOP_CQ];
148 if (qt->set_default) {
149 default_lengths[QTOP_CQ] = len;
153 /* If this CQ is for MRQ, then delay the creation */
154 if (!use_mrq || next_qt->entry != QTOP_RQ) {
155 cq = hw_new_cq(eq, len);
157 hw_queue_teardown(hw);
158 return OCS_HW_RTN_NO_MEMORY;
165 len = (qt->len) ? qt->len : default_lengths[QTOP_WQ];
166 if (qt->set_default) {
167 default_lengths[QTOP_WQ] = len;
171 if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
172 ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp);
173 hw_queue_teardown(hw);
174 return OCS_HW_RTN_NO_MEMORY;
177 wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp);
179 hw_queue_teardown(hw);
180 return OCS_HW_RTN_NO_MEMORY;
183 /* Place this WQ on the EQ WQ array */
184 if (ocs_varray_add(eq->wq_array, wq)) {
185 ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n");
186 hw_queue_teardown(hw);
187 return OCS_HW_RTN_ERROR;
190 /* Place this WQ on the HW class array */
191 if (qt->class < ARRAY_SIZE(hw->wq_class_array)) {
192 if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) {
193 ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n");
194 hw_queue_teardown(hw);
195 return OCS_HW_RTN_ERROR;
198 ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class);
199 hw_queue_teardown(hw);
200 return OCS_HW_RTN_ERROR;
204 * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given
205 * by the EQ instance modulo number of CPUs
207 if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) {
208 ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n");
209 hw_queue_teardown(hw);
210 return OCS_HW_RTN_ERROR;
216 len = (qt->len) ? qt->len : default_lengths[QTOP_RQ];
217 if (qt->set_default) {
218 default_lengths[QTOP_RQ] = len;
222 if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
223 ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp);
224 hw_queue_teardown(hw);
225 return OCS_HW_RTN_NO_MEMORY;
229 mrq.rq_cfg[mrq.num_pairs].len = len;
230 mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp;
231 mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask;
232 mrq.rq_cfg[mrq.num_pairs].eq = eq;
235 rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp);
237 hw_queue_teardown(hw);
238 return OCS_HW_RTN_NO_MEMORY;
240 rq->filter_mask = qt->filter_mask;
246 len = (qt->len) ? qt->len : default_lengths[QTOP_MQ];
247 if (qt->set_default) {
248 default_lengths[QTOP_MQ] = len;
252 mq = hw_new_mq(cq, len);
254 hw_queue_teardown(hw);
255 return OCS_HW_RTN_NO_MEMORY;
266 /* First create normal RQs. */
267 for (i = 0; i < mrq.num_pairs; i++) {
268 for (j = 0; j < mrq.num_pairs; j++) {
269 if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) {
270 /* This should be created using set */
271 if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) {
272 ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n");
273 hw_queue_teardown(hw);
274 return OCS_HW_RTN_ERROR;
275 } else if (!rqset_filter_mask){
276 rqset_filter_mask = mrq.rq_cfg[i].filter_mask;
277 rqset_len = mrq.rq_cfg[i].len;
278 rqset_ulp = mrq.rq_cfg[i].ulp;
280 eqs[rqset_count] = mrq.rq_cfg[i].eq;
285 if (j == mrq.num_pairs) {
287 cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]);
289 hw_queue_teardown(hw);
290 return OCS_HW_RTN_NO_MEMORY;
293 rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp);
295 hw_queue_teardown(hw);
296 return OCS_HW_RTN_NO_MEMORY;
298 rq->filter_mask = mrq.rq_cfg[i].filter_mask;
302 /* Now create RQ Set */
304 if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) {
306 "Max Supported MRQ pairs = %d\n",
307 OCE_HW_MAX_NUM_MRQ_PAIRS);
308 hw_queue_teardown(hw);
309 return OCS_HW_RTN_ERROR;
313 if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) {
314 hw_queue_teardown(hw);
315 return OCS_HW_RTN_ERROR;
319 if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) {
320 hw_queue_teardown(hw);
321 return OCS_HW_RTN_ERROR;
324 for (i = 0; i < rqset_count ; i++) {
325 rqs[i]->filter_mask = rqset_filter_mask;
326 rqs[i]->is_mrq = TRUE;
327 rqs[i]->base_mrq_id = rqs[0]->hdr->id;
330 hw->hw_mrq_count = rqset_count;
334 return OCS_HW_RTN_SUCCESS;
339 * @brief Allocate a new EQ object
341 * A new EQ object is instantiated
343 * @param hw pointer to HW object
344 * @param entry_count number of entries in the EQ
346 * @return pointer to allocated EQ object
349 hw_new_eq(ocs_hw_t *hw, uint32_t entry_count)
351 hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT);
354 eq->type = SLI_QTYPE_EQ;
356 eq->entry_count = entry_count;
357 eq->instance = hw->eq_count++;
358 eq->queue = &hw->eq[eq->instance];
359 ocs_list_init(&eq->cq_list, hw_cq_t, link);
361 eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
362 if (eq->wq_array == NULL) {
363 ocs_free(hw->os, eq, sizeof(*eq));
366 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) {
367 ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance);
368 ocs_free(hw->os, eq, sizeof(*eq));
371 sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
372 hw->hw_eq[eq->instance] = eq;
373 ocs_list_add_tail(&hw->eq_list, eq);
374 ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id,
383 * @brief Allocate a new CQ object
385 * A new CQ object is instantiated
387 * @param eq pointer to parent EQ object
388 * @param entry_count number of entries in the CQ
390 * @return pointer to allocated CQ object
393 hw_new_cq(hw_eq_t *eq, uint32_t entry_count)
395 ocs_hw_t *hw = eq->hw;
396 hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
400 cq->type = SLI_QTYPE_CQ;
401 cq->instance = eq->hw->cq_count++;
402 cq->entry_count = entry_count;
403 cq->queue = &hw->cq[cq->instance];
405 ocs_list_init(&cq->q_list, hw_q_t, link);
407 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) {
408 ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
411 ocs_free(hw->os, cq, sizeof(*cq));
414 hw->hw_cq[cq->instance] = cq;
415 ocs_list_add_tail(&eq->cq_list, cq);
416 ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id,
424 * @brief Allocate a new CQ Set of objects.
426 * @param eqs pointer to a set of EQ objects.
427 * @param cqs pointer to a set of CQ objects to be returned.
428 * @param num_cqs number of CQ queues in the set.
429 * @param entry_count number of entries in the CQ.
431 * @return 0 on success and -1 on failure.
434 hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count)
437 ocs_hw_t *hw = eqs[0]->hw;
438 sli4_t *sli4 = &hw->sli;
440 sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT];
442 /* Initialise CQS pointers to NULL */
443 for (i = 0; i < num_cqs; i++) {
447 for (i = 0; i < num_cqs; i++) {
448 cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
454 cq->type = SLI_QTYPE_CQ;
455 cq->instance = hw->cq_count++;
456 cq->entry_count = entry_count;
457 cq->queue = &hw->cq[cq->instance];
459 assocs[i] = eqs[i]->queue;
460 ocs_list_init(&cq->q_list, hw_q_t, link);
463 if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) {
464 ocs_log_err(NULL, "Failed to create CQ Set. \n");
468 for (i = 0; i < num_cqs; i++) {
469 hw->hw_cq[cqs[i]->instance] = cqs[i];
470 ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]);
476 for (i = 0; i < num_cqs; i++) {
478 ocs_free(hw->os, cqs[i], sizeof(*cqs[i]));
487 * @brief Allocate a new MQ object
489 * A new MQ object is instantiated
491 * @param cq pointer to parent CQ object
492 * @param entry_count number of entries in the MQ
494 * @return pointer to allocated MQ object
497 hw_new_mq(hw_cq_t *cq, uint32_t entry_count)
499 ocs_hw_t *hw = cq->eq->hw;
500 hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT);
504 mq->type = SLI_QTYPE_MQ;
505 mq->instance = cq->eq->hw->mq_count++;
506 mq->entry_count = entry_count;
507 mq->entry_size = OCS_HW_MQ_DEPTH;
508 mq->queue = &hw->mq[mq->instance];
510 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ,
514 ocs_log_err(hw->os, "MQ allocation failure\n");
515 ocs_free(hw->os, mq, sizeof(*mq));
518 hw->hw_mq[mq->instance] = mq;
519 ocs_list_add_tail(&cq->q_list, mq);
520 ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id,
528 * @brief Allocate a new WQ object
530 * A new WQ object is instantiated
532 * @param cq pointer to parent CQ object
533 * @param entry_count number of entries in the WQ
534 * @param class WQ class
535 * @param ulp index of chute
537 * @return pointer to allocated WQ object
540 hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp)
542 ocs_hw_t *hw = cq->eq->hw;
543 hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT);
548 wq->type = SLI_QTYPE_WQ;
549 wq->instance = cq->eq->hw->wq_count++;
550 wq->entry_count = entry_count;
551 wq->queue = &hw->wq[wq->instance];
553 wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT;
554 wq->wqec_count = wq->wqec_set_count;
555 wq->free_count = wq->entry_count - 1;
557 ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link);
559 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) {
560 ocs_log_err(hw->os, "WQ allocation failure\n");
561 ocs_free(hw->os, wq, sizeof(*wq));
564 hw->hw_wq[wq->instance] = wq;
565 ocs_list_add_tail(&cq->q_list, wq);
566 ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id,
567 wq->entry_count, wq->class, wq->ulp);
574 * @brief Allocate a hw_rq_t object
576 * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair)
578 * @param cq pointer to parent CQ object
579 * @param entry_count number of entries in the RQs
580 * @param ulp ULP index for this RQ
582 * @return pointer to newly allocated hw_rq_t
585 hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp)
587 ocs_hw_t *hw = cq->eq->hw;
588 hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
591 ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
595 rq->instance = hw->hw_rq_count++;
597 rq->type = SLI_QTYPE_RQ;
600 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
602 /* Create the header RQ */
603 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
604 rq->hdr = &hw->rq[hw->rq_count];
605 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
607 if (sli_fc_rq_alloc(&hw->sli, rq->hdr,
612 ocs_log_err(hw->os, "RQ allocation failure - header\n");
613 ocs_free(hw->os, rq, sizeof(*rq));
616 hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */
618 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr size %4d ulp %d\n",
619 rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp);
621 /* Create the default data RQ */
622 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
623 rq->data = &hw->rq[hw->rq_count];
624 rq->data_entry_size = hw->config.rq_default_buffer_size;
626 if (sli_fc_rq_alloc(&hw->sli, rq->data,
631 ocs_log_err(hw->os, "RQ allocation failure - first burst\n");
632 ocs_free(hw->os, rq, sizeof(*rq));
635 hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */
637 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance,
638 rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp);
640 hw->hw_rq[rq->instance] = rq;
641 ocs_list_add_tail(&cq->q_list, rq);
643 rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
644 rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
645 if (rq->rq_tracker == NULL) {
646 ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
655 * @brief Allocate a hw_rq_t object SET
657 * Allocate an RQ object SET, where each element in set
658 * encapsulates 2 SLI queues (for rq pair)
660 * @param cqs pointers to be associated with RQs.
661 * @param rqs RQ pointers to be returned on success.
662 * @param num_rq_pairs number of rq pairs in the Set.
663 * @param entry_count number of entries in the RQs
664 * @param ulp ULP index for this RQ
666 * @return 0 in success and -1 on failure.
669 hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp)
671 ocs_hw_t *hw = cqs[0]->eq->hw;
673 sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL };
674 uint32_t max_hw_rq, i, q_count;
676 ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
678 /* Initialise RQS pointers */
679 for (i = 0; i < num_rq_pairs; i++) {
683 for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
684 rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
689 rq->instance = hw->hw_rq_count++;
691 rq->type = SLI_QTYPE_RQ;
693 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
696 rq->hdr = &hw->rq[hw->rq_count];
697 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
698 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
700 qs[q_count] = rq->hdr;
703 rq->data = &hw->rq[hw->rq_count];
704 rq->data_entry_size = hw->config.rq_default_buffer_size;
705 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
707 qs[q_count + 1] = rq->data;
709 rq->rq_tracker = NULL;
712 if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
715 rqs[0]->hdr_entry_size,
716 rqs[0]->data_entry_size,
718 ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id);
723 for (i = 0; i < num_rq_pairs; i++) {
724 hw->hw_rq[rqs[i]->instance] = rqs[i];
725 ocs_list_add_tail(&cqs[i]->q_list, rqs[i]);
726 rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
727 rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
728 if (rqs[i]->rq_tracker == NULL) {
729 ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
737 for (i = 0; i < num_rq_pairs; i++) {
738 if (rqs[i] != NULL) {
739 if (rqs[i]->rq_tracker != NULL) {
740 ocs_free(hw->os, rq->rq_tracker,
741 sizeof(ocs_hw_sequence_t*) * rq->entry_count);
743 ocs_free(hw->os, rqs[i], sizeof(*rqs[i]));
752 * @brief Free an EQ object
754 * The EQ object and any child queue objects are freed
756 * @param eq pointer to EQ object
761 hw_del_eq(hw_eq_t *eq)
767 ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) {
770 ocs_varray_free(eq->wq_array);
771 ocs_list_remove(&eq->hw->eq_list, eq);
772 eq->hw->hw_eq[eq->instance] = NULL;
773 ocs_free(eq->hw->os, eq, sizeof(*eq));
778 * @brief Free a CQ object
780 * The CQ object and any child queue objects are freed
782 * @param cq pointer to CQ object
787 hw_del_cq(hw_cq_t *cq)
793 ocs_list_foreach_safe(&cq->q_list, q, q_next) {
796 hw_del_mq((hw_mq_t*) q);
799 hw_del_wq((hw_wq_t*) q);
802 hw_del_rq((hw_rq_t*) q);
808 ocs_list_remove(&cq->eq->cq_list, cq);
809 cq->eq->hw->hw_cq[cq->instance] = NULL;
810 ocs_free(cq->eq->hw->os, cq, sizeof(*cq));
815 * @brief Free a MQ object
817 * The MQ object is freed
819 * @param mq pointer to MQ object
824 hw_del_mq(hw_mq_t *mq)
827 ocs_list_remove(&mq->cq->q_list, mq);
828 mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
829 ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq));
834 * @brief Free a WQ object
836 * The WQ object is freed
838 * @param wq pointer to WQ object
843 hw_del_wq(hw_wq_t *wq)
846 ocs_list_remove(&wq->cq->q_list, wq);
847 wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
848 ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq));
853 * @brief Free an RQ object
855 * The RQ object is freed
857 * @param rq pointer to RQ object
862 hw_del_rq(hw_rq_t *rq)
864 ocs_hw_t *hw = rq->cq->eq->hw;
867 /* Free RQ tracker */
868 if (rq->rq_tracker != NULL) {
869 ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count);
870 rq->rq_tracker = NULL;
872 ocs_list_remove(&rq->cq->q_list, rq);
873 hw->hw_rq[rq->instance] = NULL;
874 ocs_free(hw->os, rq, sizeof(*rq));
879 * @brief Display HW queue objects
881 * The HW queue objects are displayed using ocs_log
883 * @param hw pointer to HW object
888 hw_queue_dump(ocs_hw_t *hw)
897 ocs_list_foreach(&hw->eq_list, eq) {
898 ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id);
899 ocs_list_foreach(&eq->cq_list, cq) {
900 ocs_printf(" cq[%d] id %2d current\n", cq->instance, cq->queue->id);
901 ocs_list_foreach(&cq->q_list, q) {
905 ocs_printf(" mq[%d] id %2d\n", mq->instance, mq->queue->id);
909 ocs_printf(" wq[%d] id %2d\n", wq->instance, wq->queue->id);
913 ocs_printf(" rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id);
924 * @brief Teardown HW queue objects
926 * The HW queue objects are freed
928 * @param hw pointer to HW object
933 hw_queue_teardown(ocs_hw_t *hw)
939 if (ocs_list_valid(&hw->eq_list)) {
940 ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) {
944 for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
945 ocs_varray_free(hw->wq_cpu_array[i]);
946 hw->wq_cpu_array[i] = NULL;
948 for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
949 ocs_varray_free(hw->wq_class_array[i]);
950 hw->wq_class_array[i] = NULL;
955 * @brief Allocate a WQ to an IO object
957 * The next work queue index is used to assign a WQ to an IO.
959 * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is
962 * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that
963 * the IO request came in on is selected.
965 * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associted with the
966 * CPU the request is made on is selected.
968 * @param hw pointer to HW object
969 * @param io pointer to IO object
971 * @return Return pointer to next WQ
974 ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io)
979 switch(io->wq_steering) {
980 case OCS_HW_WQ_STEERING_CLASS:
981 if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) {
982 wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]);
985 case OCS_HW_WQ_STEERING_REQUEST:
987 if (likely(eq != NULL)) {
988 wq = ocs_varray_iter_next(eq->wq_array);
991 case OCS_HW_WQ_STEERING_CPU: {
992 uint32_t cpuidx = ocs_thread_getcpu();
994 if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) {
995 wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]);
1001 if (unlikely(wq == NULL)) {
1009 * @brief Return count of EQs for a queue topology object
1011 * The EQ count for in the HWs queue topology (hw->qtop) object is returned
1013 * @param hw pointer to HW object
1015 * @return count of EQs
1018 ocs_hw_qtop_eq_count(ocs_hw_t *hw)
1020 return hw->qtop->entry_counts[QTOP_EQ];
1023 #define TOKEN_LEN 32
1026 * @brief return string given a QTOP entry
1028 * @param entry QTOP entry
1030 * @return returns string or "unknown"
1034 qtopentry2s(ocs_hw_qtop_entry_e entry) {
1036 #define P(x) case x: return #x;
1042 P(QTOP_THREAD_START)
1052 * @brief Declare token types
1067 * @brief Declare token sub-types
1082 * @brief convert queue subtype to QTOP entry
1084 * @param q queue subtype
1086 * @return QTOP entry or 0
1088 static ocs_hw_qtop_entry_e
1089 subtype2qtop(tok_subtype_e q)
1092 case TOK_SUB_EQ: return QTOP_EQ;
1093 case TOK_SUB_CQ: return QTOP_CQ;
1094 case TOK_SUB_RQ: return QTOP_RQ;
1095 case TOK_SUB_MQ: return QTOP_MQ;
1096 case TOK_SUB_WQ: return QTOP_WQ;
1104 * @brief Declare token object
1108 tok_subtype_e subtype;
1109 char string[TOKEN_LEN];
1113 * @brief Declare token array object
1116 tok_t *tokens; /* Pointer to array of tokens */
1117 uint32_t alloc_count; /* Number of tokens in the array */
1118 uint32_t inuse_count; /* Number of tokens posted to array */
1119 uint32_t iter_idx; /* Iterator index */
1123 * @brief Declare token match structure
1128 tok_subtype_e subtype;
1132 * @brief test if character is ID start character
1134 * @param c character to test
1136 * @return TRUE if character is an ID start character
1141 return isalpha(c) || (c == '_') || (c == '$');
1145 * @brief test if character is an ID character
1147 * @param c character to test
1149 * @return TRUE if character is an ID character
1154 return idstart(c) || ocs_isdigit(c);
1158 * @brief Declare single character matches
1160 static tokmatch_t cmatches[] = {
1168 * @brief Declare identifier match strings
1170 static tokmatch_t smatches[] = {
1171 {"eq", TOK_QUEUE, TOK_SUB_EQ},
1172 {"cq", TOK_QUEUE, TOK_SUB_CQ},
1173 {"rq", TOK_QUEUE, TOK_SUB_RQ},
1174 {"mq", TOK_QUEUE, TOK_SUB_MQ},
1175 {"wq", TOK_QUEUE, TOK_SUB_WQ},
1176 {"len", TOK_ATTR_NAME, TOK_SUB_LEN},
1177 {"class", TOK_ATTR_NAME, TOK_SUB_CLASS},
1178 {"ulp", TOK_ATTR_NAME, TOK_SUB_ULP},
1179 {"filter", TOK_ATTR_NAME, TOK_SUB_FILTER},
1183 * @brief Scan string and return next token
1185 * The string is scanned and the next token is returned
1187 * @param s input string to scan
1188 * @param tok pointer to place scanned token
1190 * @return pointer to input string following scanned token, or NULL
1193 tokenize(const char *s, tok_t *tok)
1197 memset(tok, 0, sizeof(*tok));
1199 /* Skip over whitespace */
1200 while (*s && ocs_isspace(*s)) {
1204 /* Return if nothing left in this string */
1209 /* Look for single character matches */
1210 for (i = 0; i < ARRAY_SIZE(cmatches); i++) {
1211 if (cmatches[i].s[0] == *s) {
1212 tok->type = cmatches[i].type;
1213 tok->subtype = cmatches[i].subtype;
1214 tok->string[0] = *s++;
1219 /* Scan for a hex number or decimal */
1220 if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) {
1221 char *p = tok->string;
1223 tok->type = TOK_NUMBER;
1227 while ((*s == '.') || ocs_isxdigit(*s)) {
1228 if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1232 tok->type = TOK_NUMBER_LIST;
1238 } else if (ocs_isdigit(*s)) {
1239 char *p = tok->string;
1241 tok->type = TOK_NUMBER;
1242 while ((*s == ',') || ocs_isdigit(*s)) {
1243 if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1247 tok->type = TOK_NUMBER_LIST;
1255 /* Scan for an ID */
1257 char *p = tok->string;
1259 for (*p++ = *s++; idchar(*s); s++) {
1260 if ((p - tok->string) < TOKEN_LEN) {
1265 /* See if this is a $ number value */
1266 if (tok->string[0] == '$') {
1267 tok->type = TOK_NUMBER_VALUE;
1269 /* Look for a string match */
1270 for (i = 0; i < ARRAY_SIZE(smatches); i++) {
1271 if (strcmp(smatches[i].s, tok->string) == 0) {
1272 tok->type = smatches[i].type;
1273 tok->subtype = smatches[i].subtype;
1283 * @brief convert token type to string
1285 * @param type token type
1287 * @return string, or "unknown"
1290 token_type2s(tok_type_e type)
1293 #define P(x) case x: return #x;
1309 * @brief convert token sub-type to string
1311 * @param subtype token sub-type
1313 * @return string, or "unknown"
1316 token_subtype2s(tok_subtype_e subtype)
1319 #define P(x) case x: return #x;
1335 * @brief Generate syntax error message
1337 * A syntax error message is found, the input tokens are dumped up to and including
1338 * the token that failed as indicated by the current iterator index.
1340 * @param hw pointer to HW object
1341 * @param tokarray pointer to token array object
1346 tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray)
1351 ocs_log_test(hw->os, "Syntax error:\n");
1353 for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) {
1354 ocs_log_test(hw->os, "%s [%2d] %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : " ", i,
1355 token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string);
1360 * @brief parse a number
1362 * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value
1364 * @param hw pointer to HW object
1365 * @param qtop pointer to QTOP object
1366 * @param tok pointer to token to parse
1368 * @return numeric value
1371 tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok)
1374 uint32_t num_cpus = ocs_get_num_cpus();
1377 case TOK_NUMBER_VALUE:
1378 if (ocs_strcmp(tok->string, "$ncpu") == 0) {
1380 } else if (ocs_strcmp(tok->string, "$ncpu1") == 0) {
1381 rval = num_cpus - 1;
1382 } else if (ocs_strcmp(tok->string, "$nwq") == 0) {
1384 rval = hw->config.n_wq;
1386 } else if (ocs_strcmp(tok->string, "$maxmrq") == 0) {
1387 rval = MIN(num_cpus, OCS_HW_MAX_MRQS);
1388 } else if (ocs_strcmp(tok->string, "$nulp") == 0) {
1389 rval = hw->ulp_max - hw->ulp_start + 1;
1390 } else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) {
1391 rval = qtop->rptcount[qtop->rptcount_idx-1];
1392 } else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) {
1393 rval = qtop->rptcount[qtop->rptcount_idx-2];
1394 } else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) {
1395 rval = qtop->rptcount[qtop->rptcount_idx-3];
1396 } else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) {
1397 rval = qtop->rptcount[qtop->rptcount_idx-4];
1399 rval = ocs_strtoul(tok->string, 0, 0);
1403 rval = ocs_strtoul(tok->string, 0, 0);
1413 * @brief parse an array of tokens
1415 * The tokens are semantically parsed, to generate QTOP entries.
1417 * @param hw pointer to HW object
1418 * @param tokarray array array of tokens
1419 * @param qtop ouptut QTOP object
1421 * @return returns 0 for success, a negative error code value for failure.
1424 parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop)
1426 ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count;
1429 for (; (tokarray->iter_idx < tokarray->inuse_count) &&
1430 ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) {
1431 if (qtop->inuse_count >= qtop->alloc_count) {
1435 qt = qtop->entries + qtop->inuse_count;
1437 switch (tok[0].type)
1440 qt->entry = subtype2qtop(tok[0].subtype);
1441 qt->set_default = FALSE;
1444 qtop->inuse_count++;
1446 tokarray->iter_idx++; /* Advance current token index */
1448 /* Parse for queue attributes, possibly multiple instances */
1449 while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) {
1450 tok = &tokarray->tokens[tokarray->iter_idx];
1451 if( (tok[0].type == TOK_COLON) &&
1452 (tok[1].type == TOK_ATTR_NAME) &&
1453 (tok[2].type == TOK_EQUALS) &&
1454 ((tok[3].type == TOK_NUMBER) ||
1455 (tok[3].type == TOK_NUMBER_VALUE) ||
1456 (tok[3].type == TOK_NUMBER_LIST))) {
1458 switch (tok[1].subtype) {
1460 qt->len = tok_getnumber(hw, qtop, &tok[3]);
1464 qt->class = tok_getnumber(hw, qtop, &tok[3]);
1468 qt->ulp = tok_getnumber(hw, qtop, &tok[3]);
1471 case TOK_SUB_FILTER:
1472 if (tok[3].type == TOK_NUMBER_LIST) {
1474 char *p = tok[3].string;
1476 while ((p != NULL) && *p) {
1479 v = ocs_strtoul(p, 0, 0);
1484 p = ocs_strchr(p, ',');
1489 qt->filter_mask = mask;
1491 qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3]));
1497 /* Advance current token index */
1498 tokarray->iter_idx += 4;
1503 qtop->entry_counts[qt->entry]++;
1507 if ( ((tokarray->iter_idx + 5) <= tokarray->inuse_count) &&
1508 (tok[1].type == TOK_COLON) &&
1509 (tok[2].type == TOK_QUEUE) &&
1510 (tok[3].type == TOK_EQUALS) &&
1511 ((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) {
1512 qt->entry = subtype2qtop(tok[2].subtype);
1513 qt->set_default = TRUE;
1514 switch(tok[0].subtype) {
1516 qt->len = tok_getnumber(hw, qtop, &tok[4]);
1519 qt->class = tok_getnumber(hw, qtop, &tok[4]);
1522 qt->ulp = tok_getnumber(hw, qtop, &tok[4]);
1527 qtop->inuse_count++;
1528 tokarray->iter_idx += 5;
1530 tok_syntax(hw, tokarray);
1536 case TOK_NUMBER_VALUE: {
1537 uint32_t rpt_count = 1;
1540 rpt_count = tok_getnumber(hw, qtop, tok);
1542 if (tok[1].type == TOK_LPAREN) {
1543 uint32_t iter_idx_save;
1545 tokarray->iter_idx += 2;
1547 /* save token array iteration index */
1548 iter_idx_save = tokarray->iter_idx;
1550 for (i = 0; i < rpt_count; i++) {
1551 uint32_t rptcount_idx = qtop->rptcount_idx;
1553 if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) {
1554 qtop->rptcount[qtop->rptcount_idx++] = i;
1557 /* restore token array iteration index */
1558 tokarray->iter_idx = iter_idx_save;
1560 /* parse, append to qtop */
1561 parse_topology(hw, tokarray, qtop);
1563 qtop->rptcount_idx = rptcount_idx;
1570 tokarray->iter_idx++;
1574 tok_syntax(hw, tokarray);
1582 * @brief Parse queue topology string
1584 * The queue topology object is allocated, and filled with the results of parsing the
1585 * passed in queue topology string
1587 * @param hw pointer to HW object
1588 * @param qtop_string input queue topology string
1590 * @return pointer to allocated QTOP object, or NULL if there was an error
1593 ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string)
1595 ocs_hw_qtop_t *qtop;
1596 tokarray_t tokarray;
1600 ocs_hw_qtop_entry_t *qt;
1603 ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string);
1605 /* Allocate a token array */
1606 tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT);
1607 if (tokarray.tokens == NULL) {
1610 tokarray.alloc_count = MAX_TOKENS;
1611 tokarray.inuse_count = 0;
1612 tokarray.iter_idx = 0;
1614 /* Parse the tokens */
1615 for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) &&
1616 ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) {
1617 tokarray.inuse_count++;;
1620 /* Allocate a queue topology structure */
1621 qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT);
1623 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1624 ocs_log_err(hw->os, "malloc qtop failed\n");
1629 /* Allocate queue topology entries */
1630 qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT);
1631 if (qtop->entries == NULL) {
1632 ocs_log_err(hw->os, "malloc qtop entries failed\n");
1633 ocs_free(hw->os, qtop, sizeof(*qtop));
1634 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1637 qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES;
1638 qtop->inuse_count = 0;
1640 /* Parse the tokens */
1641 parse_topology(hw, &tokarray, qtop);
1643 for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
1644 ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len,
1645 qt->class, qt->ulp);
1649 /* Free the tokens array */
1650 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1656 * @brief free queue topology object
1658 * @param qtop pointer to QTOP object
1663 ocs_hw_qtop_free(ocs_hw_qtop_t *qtop)
1666 if (qtop->entries != NULL) {
1667 ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries));
1669 ocs_free(qtop->os, qtop, sizeof(*qtop));
1673 /* Uncomment this to turn on RQ debug */
1674 // #define ENABLE_DEBUG_RQBUF
1676 static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id);
1677 static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex);
1678 static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1679 static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1682 * @brief Process receive queue completions for RQ Pair mode.
1685 * RQ completions are processed. In RQ pair mode, a single header and single payload
1686 * buffer are received, and passed to the function that has registered for unsolicited
1689 * @param hw Hardware context.
1690 * @param cq Pointer to HW completion queue.
1691 * @param cqe Completion queue entry.
1693 * @return Returns 0 for success, or a negative error code value for failure.
1697 ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1705 ocs_hw_sequence_t *seq;
1707 rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1708 if (0 != rq_status) {
1709 switch (rq_status) {
1710 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1711 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1712 /* just get RQ buffer then return to chip */
1713 rqindex = ocs_hw_rqpair_find(hw, rq_id);
1715 ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1721 seq = ocs_hw_rqpair_get(hw, rqindex, index);
1723 /* return to chip */
1724 if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1725 ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n",
1730 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1731 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1732 /* since RQ buffers were not consumed, cannot return them to chip */
1734 ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1741 rqindex = ocs_hw_rqpair_find(hw, rq_id);
1743 ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1747 OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1748 rq->payload_use_count++;})
1750 seq = ocs_hw_rqpair_get(hw, rqindex, index);
1751 ocs_hw_assert(seq != NULL);
1755 seq->out_of_xris = 0;
1759 sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1760 seq->header->dma.len = h_len;
1761 seq->payload->dma.len = p_len;
1762 seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1763 seq->hw_priv = cq->eq;
1765 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1766 if (hw->config.bounce) {
1767 fc_header_t *hdr = seq->header->dma.virt;
1768 uint32_t s_id = fc_be24toh(hdr->s_id);
1769 uint32_t d_id = fc_be24toh(hdr->d_id);
1770 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
1771 if (hw->callback.bounce != NULL) {
1772 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1775 hw->callback.unsolicited(hw->args.unsolicited, seq);
1782 * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy
1785 * RQ completions are processed. In RQ pair mode, a single header and single payload
1786 * buffer are received, and passed to the function that has registered for unsolicited
1789 * @param hw Hardware context.
1790 * @param cq Pointer to HW completion queue.
1791 * @param cqe Completion queue entry.
1793 * @return Returns 0 for success, or a negative error code value for failure.
1797 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1799 /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1800 sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe;
1807 ocs_hw_sequence_t *seq;
1808 uint8_t axr_lock_taken = 0;
1809 #if defined(OCS_DISC_SPIN_DELAY)
1814 rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1815 if (0 != rq_status) {
1816 switch (rq_status) {
1817 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1818 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1819 /* just get RQ buffer then return to chip */
1820 rqindex = ocs_hw_rqpair_find(hw, rq_id);
1822 ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1828 seq = ocs_hw_rqpair_get(hw, rqindex, index);
1830 /* return to chip */
1831 if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1832 ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n",
1837 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1838 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1839 /* since RQ buffers were not consumed, cannot return them to chip */
1840 ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1848 rqindex = ocs_hw_rqpair_find(hw, rq_id);
1850 ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1854 OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1855 rq->payload_use_count++;})
1857 seq = ocs_hw_rqpair_get(hw, rqindex, index);
1858 ocs_hw_assert(seq != NULL);
1861 seq->auto_xrdy = opt_wr->agxr;
1862 seq->out_of_xris = opt_wr->oox;
1863 seq->xri = opt_wr->xri;
1866 sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1867 seq->header->dma.len = h_len;
1868 seq->payload->dma.len = p_len;
1869 seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1870 seq->hw_priv = cq->eq;
1872 if (seq->auto_xrdy) {
1873 fc_header_t *fc_hdr = seq->header->dma.virt;
1875 seq->hio = ocs_hw_io_lookup(hw, seq->xri);
1876 ocs_lock(&seq->hio->axr_lock);
1879 /* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */
1880 seq->hio->axr_buf->fcfi = seq->fcfi;
1881 seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id;
1882 seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id;
1883 seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id;
1884 seq->hio->axr_buf->cmd_cqe = 1;
1887 * Since auto xfer rdy is used for this IO, then clear the sequence
1888 * initiative bit in the header so that the upper layers wait for the
1889 * data. This should flow exactly like the first burst case.
1891 fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE);
1893 /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
1894 if (seq->hio->type == OCS_HW_IO_TARGET_RSP) {
1895 seq->hio->axr_buf->call_axr_cmd = 1;
1896 seq->hio->axr_buf->cmd_seq = seq;
1897 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd;
1901 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1902 if (hw->config.bounce) {
1903 fc_header_t *hdr = seq->header->dma.virt;
1904 uint32_t s_id = fc_be24toh(hdr->s_id);
1905 uint32_t d_id = fc_be24toh(hdr->d_id);
1906 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
1907 if (hw->callback.bounce != NULL) {
1908 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1911 hw->callback.unsolicited(hw->args.unsolicited, seq);
1914 if (seq->auto_xrdy) {
1915 /* If data cqe came before cmd cqe in out of order in case of AXR */
1916 if(seq->hio->axr_buf->data_cqe == 1) {
1918 #if defined(OCS_DISC_SPIN_DELAY)
1919 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
1920 delay = ocs_strtoul(prop_buf, 0, 0);
1924 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1925 if (hw->config.bounce) {
1926 fc_header_t *hdr = seq->header->dma.virt;
1927 uint32_t s_id = fc_be24toh(hdr->s_id);
1928 uint32_t d_id = fc_be24toh(hdr->d_id);
1929 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
1930 if (hw->callback.bounce != NULL) {
1931 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id);
1934 hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq);
1939 exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd:
1940 if(axr_lock_taken) {
1941 ocs_unlock(&seq->hio->axr_lock);
1947 * @brief Process CQ completions for Auto xfer rdy data phases.
1950 * The data is DMA'd into the data buffer posted to the SGL prior to the XRI
1951 * being assigned to an IO. When the completion is received, All of the data
1952 * is in the single buffer.
1954 * @param hw Hardware context.
1955 * @param cq Pointer to HW completion queue.
1956 * @param cqe Completion queue entry.
1958 * @return Returns 0 for success, or a negative error code value for failure.
1962 ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1964 /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1965 sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe;
1966 ocs_hw_sequence_t *seq;
1968 ocs_hw_auto_xfer_rdy_buffer_t *buf;
1969 #if defined(OCS_DISC_SPIN_DELAY)
1973 /* Look up the IO */
1974 io = ocs_hw_io_lookup(hw, opt_wr->xri);
1975 ocs_lock(&io->axr_lock);
1981 seq->out_of_xris = 0;
1982 seq->xri = opt_wr->xri;
1984 seq->header = &buf->header;
1985 seq->payload = &buf->payload;
1987 seq->header->dma.len = sizeof(fc_header_t);
1988 seq->payload->dma.len = opt_wr->total_data_placed;
1989 seq->fcfi = buf->fcfi;
1990 seq->hw_priv = cq->eq;
1993 if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) {
1994 seq->status = OCS_HW_UNSOL_SUCCESS;
1995 } else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) {
1996 seq->status = OCS_HW_UNSOL_ABTS_RCVD;
1998 seq->status = OCS_HW_UNSOL_ERROR;
2001 /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
2002 if(io->type == OCS_HW_IO_TARGET_RSP) {
2003 io->axr_buf->call_axr_data = 1;
2004 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2008 /* if data cqe came before cmd cqe, return here, cmd cqe will handle */
2009 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2011 #if defined(OCS_DISC_SPIN_DELAY)
2012 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
2013 delay = ocs_strtoul(prop_buf, 0, 0);
2018 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
2019 if (hw->config.bounce) {
2020 fc_header_t *hdr = seq->header->dma.virt;
2021 uint32_t s_id = fc_be24toh(hdr->s_id);
2022 uint32_t d_id = fc_be24toh(hdr->d_id);
2023 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
2024 if (hw->callback.bounce != NULL) {
2025 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
2028 hw->callback.unsolicited(hw->args.unsolicited, seq);
2031 exit_ocs_hw_rqpair_process_auto_xfr_rdy_data:
2032 ocs_unlock(&io->axr_lock);
2037 * @brief Return pointer to RQ buffer entry.
2040 * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex.
2042 * @param hw Hardware context.
2043 * @param rqindex Index of the RQ that is being processed.
2044 * @param bufindex Index into the RQ that is being processed.
2046 * @return Pointer to the sequence structure, or NULL otherwise.
2048 static ocs_hw_sequence_t *
2049 ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex)
2051 sli4_queue_t *rq_hdr = &hw->rq[rqindex];
2052 sli4_queue_t *rq_payload = &hw->rq[rqindex+1];
2053 ocs_hw_sequence_t *seq = NULL;
2054 hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
2056 #if defined(ENABLE_DEBUG_RQBUF)
2057 uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff);
2060 if (bufindex >= rq_hdr->length) {
2061 ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n",
2062 rqindex, bufindex, rq_hdr->length, rq_hdr->id);
2066 sli_queue_lock(rq_hdr);
2067 sli_queue_lock(rq_payload);
2069 #if defined(ENABLE_DEBUG_RQBUF)
2070 /* Put a debug value into the rq, to track which entries are still valid */
2071 _sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value);
2072 _sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value);
2075 seq = rq->rq_tracker[bufindex];
2076 rq->rq_tracker[bufindex] = NULL;
2079 ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n",
2080 rqindex, bufindex, rq_hdr->index);
2083 sli_queue_unlock(rq_payload);
2084 sli_queue_unlock(rq_hdr);
2089 * @brief Posts an RQ buffer to a queue and update the verification structures
2091 * @param hw hardware context
2092 * @param seq Pointer to sequence object.
2094 * @return Returns 0 on success, or a non-zero value otherwise.
2097 ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2099 sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex];
2100 sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex];
2101 uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
2102 hw_rq_t *rq = hw->hw_rq[hw_rq_index];
2103 uint32_t phys_hdr[2];
2104 uint32_t phys_payload[2];
2106 int32_t qindex_payload;
2108 /* Update the RQ verification lookup tables */
2109 phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys);
2110 phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys);
2111 phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys);
2112 phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys);
2114 sli_queue_lock(rq_hdr);
2115 sli_queue_lock(rq_payload);
2118 * Note: The header must be posted last for buffer pair mode because
2119 * posting on the header queue posts the payload queue as well.
2120 * We do not ring the payload queue independently in RQ pair mode.
2122 qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload);
2123 qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr);
2124 if (qindex_hdr < 0 ||
2125 qindex_payload < 0) {
2126 ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
2127 sli_queue_unlock(rq_payload);
2128 sli_queue_unlock(rq_hdr);
2129 return OCS_HW_RTN_ERROR;
2132 /* ensure the indexes are the same */
2133 ocs_hw_assert(qindex_hdr == qindex_payload);
2135 /* Update the lookup table */
2136 if (rq->rq_tracker[qindex_hdr] == NULL) {
2137 rq->rq_tracker[qindex_hdr] = seq;
2139 ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n",
2140 hw_rq_index, qindex_hdr);
2143 sli_queue_unlock(rq_payload);
2144 sli_queue_unlock(rq_hdr);
2145 return OCS_HW_RTN_SUCCESS;
2149 * @brief Return RQ buffers (while in RQ pair mode).
2152 * The header and payload buffers are returned to the Receive Queue.
2154 * @param hw Hardware context.
2155 * @param seq Header/payload sequence buffers.
2157 * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure.
2161 ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2163 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2165 /* Check for auto xfer rdy dummy buffers and call the proper release function. */
2166 if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) {
2167 return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq);
2171 * Post the data buffer first. Because in RQ pair mode, ringing the
2172 * doorbell of the header ring will post the data buffer as well.
2174 if (ocs_hw_rqpair_put(hw, seq)) {
2175 ocs_log_err(hw->os, "error writing buffers\n");
2176 return OCS_HW_RTN_ERROR;
2183 * @brief Find the RQ index of RQ_ID.
2185 * @param hw Hardware context.
2186 * @param rq_id RQ ID to find.
2188 * @return Returns the RQ index, or -1 if not found
2190 static inline int32_t
2191 ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id)
2193 return ocs_hw_queue_hash_find(hw->rq_hash, rq_id);
2197 * @ingroup devInitShutdown
2198 * @brief Allocate auto xfer rdy buffers.
2201 * Allocates the auto xfer rdy buffers and places them on the free list.
2203 * @param hw Hardware context allocated by the caller.
2204 * @param num_buffers Number of buffers to allocate.
2206 * @return Returns 0 on success, or a non-zero value on failure.
2209 ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers)
2211 ocs_hw_auto_xfer_rdy_buffer_t *buf;
2214 hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE);
2215 if (hw->auto_xfer_rdy_buf_pool == NULL) {
2216 ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n");
2217 return OCS_HW_RTN_NO_MEMORY;
2220 for (i = 0; i < num_buffers; i++) {
2221 /* allocate the wrapper object */
2222 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2223 ocs_hw_assert(buf != NULL);
2225 /* allocate the auto xfer ready buffer */
2226 if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) {
2227 ocs_log_err(hw->os, "DMA allocation failed\n");
2228 ocs_free(hw->os, buf, sizeof(*buf));
2229 return OCS_HW_RTN_NO_MEMORY;
2232 /* build a fake data header in big endian */
2233 buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2234 buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2235 buf->hdr.type = FC_TYPE_FCP;
2236 buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2237 FC_FCTL_FIRST_SEQUENCE |
2238 FC_FCTL_LAST_SEQUENCE |
2239 FC_FCTL_END_SEQUENCE |
2240 FC_FCTL_SEQUENCE_INITIATIVE);
2242 /* build the fake header DMA object */
2243 buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2244 buf->header.dma.virt = &buf->hdr;
2245 buf->header.dma.alloc = buf;
2246 buf->header.dma.size = sizeof(buf->hdr);
2247 buf->header.dma.len = sizeof(buf->hdr);
2249 buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2251 return OCS_HW_RTN_SUCCESS;
2255 * @ingroup devInitShutdown
2256 * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX.
2259 * When new buffers are freed, check existing XRIs waiting for buffers.
2261 * @param hw Hardware context allocated by the caller.
2264 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw)
2269 ocs_lock(&hw->io_lock);
2271 while (!ocs_list_empty(&hw->io_port_dnrx)) {
2272 io = ocs_list_remove_head(&hw->io_port_dnrx);
2273 rc = ocs_hw_reque_xri(hw, io);
2279 ocs_unlock(&hw->io_lock);
2283 * @brief Called when the POST_SGL_PAGE command completes.
2286 * Free the mailbox command buffer.
2288 * @param hw Hardware context.
2289 * @param status Status field from the mbox completion.
2290 * @param mqe Mailbox response structure.
2291 * @param arg Pointer to a callback function that signals the caller that the command is done.
2293 * @return Returns 0.
2296 ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
2299 ocs_log_debug(hw->os, "Status 0x%x\n", status);
2302 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2307 * @brief Prepares an XRI to move to the chip.
2310 * Puts the data SGL into the SGL list for the IO object and possibly registers
2311 * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are
2312 * mailbox commands, we don't need to wait for completion before preceding.
2314 * @param hw Hardware context allocated by the caller.
2315 * @param io Pointer to the IO object.
2317 * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure.
2320 ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io)
2322 /* We only need to preregister the SGL if it has not yet been done. */
2323 if (!sli_get_sgl_preregister(&hw->sli)) {
2325 ocs_dma_t *psgls = &io->def_sgl;
2326 ocs_dma_t **sgls = &psgls;
2328 /* non-local buffer required for mailbox queue */
2329 post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2330 if (post_sgl == NULL) {
2331 ocs_log_err(hw->os, "no buffer for command\n");
2332 return OCS_HW_RTN_NO_MEMORY;
2334 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE,
2335 io->indicator, 1, sgls, NULL, NULL)) {
2336 if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT,
2337 ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) {
2338 ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE);
2339 ocs_log_err(hw->os, "SGL post failed\n");
2340 return OCS_HW_RTN_ERROR;
2345 ocs_lock(&hw->io_lock);
2346 if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */
2347 ocs_unlock(&hw->io_lock);
2348 return OCS_HW_RTN_ERROR;
2350 ocs_unlock(&hw->io_lock);
2351 return OCS_HW_RTN_SUCCESS;
2355 * @brief Prepares an XRI to move back to the host.
2358 * Releases any attached buffer back to the pool.
2360 * @param hw Hardware context allocated by the caller.
2361 * @param io Pointer to the IO object.
2364 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io)
2366 if (io->axr_buf != NULL) {
2367 ocs_lock(&hw->io_lock);
2368 /* check list and remove if there */
2369 if (ocs_list_on_list(&io->dnrx_link)) {
2370 ocs_list_remove(&hw->io_port_dnrx, io);
2371 io->auto_xfer_rdy_dnrx = 0;
2373 /* release the count for waiting for a buffer */
2374 ocs_hw_io_free(hw, io);
2377 ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
2379 ocs_unlock(&hw->io_lock);
2381 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2388 * @brief Posts an auto xfer rdy buffer to an IO.
2391 * Puts the data SGL into the SGL list for the IO object
2393 * @b Note: io_lock must be held.
2395 * @param hw Hardware context allocated by the caller.
2396 * @param io Pointer to the IO object.
2398 * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs.
2401 ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf)
2403 ocs_hw_auto_xfer_rdy_buffer_t *buf;
2407 buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool);
2411 data = io->def_sgl.virt;
2412 data[0].sge_type = SLI4_SGE_TYPE_SKIP;
2416 * Note: if we are doing DIF assists, then the SGE[1] must contain the
2417 * DI_SEED SGE. The host is responsible for programming:
2418 * SGE Type (Word 2, bits 30:27)
2419 * Replacement App Tag (Word 2 bits 15:0)
2420 * App Tag (Word 3 bits 15:0)
2421 * New Ref Tag (Word 3 bit 23)
2422 * Metadata Enable (Word 3 bit 20)
2423 * Auto-Increment RefTag (Word 3 bit 19)
2424 * Block Size (Word 3 bits 18:16)
2425 * The following fields are managed by the SLI Port:
2426 * Ref Tag Compare (Word 0)
2427 * Replacement Ref Tag (Word 1) - In not the LBA
2428 * NA (Word 2 bit 25)
2429 * Opcode RX (Word 3 bits 27:24)
2430 * Checksum Enable (Word 3 bit 22)
2431 * RefTag Enable (Word 3 bit 21)
2433 * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared.
2435 if (hw->config.auto_xfer_rdy_p_type) {
2436 sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1];
2438 diseed->sge_type = SLI4_SGE_TYPE_DISEED;
2439 diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value;
2440 diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value;
2441 diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid;
2442 diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */
2443 diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
2445 data[1].sge_type = SLI4_SGE_TYPE_SKIP;
2449 data[2].sge_type = SLI4_SGE_TYPE_DATA;
2450 data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys);
2451 data[2].buffer_address_low = ocs_addr32_lo(io->axr_buf->payload.dma.phys);
2452 data[2].buffer_length = io->axr_buf->payload.dma.size;
2453 data[2].last = TRUE;
2454 data[3].sge_type = SLI4_SGE_TYPE_SKIP;
2460 * @brief Return auto xfer ready buffers (while in RQ pair mode).
2463 * The header and payload buffers are returned to the auto xfer rdy pool.
2465 * @param hw Hardware context.
2466 * @param seq Header/payload sequence buffers.
2468 * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure.
2472 ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2474 ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc;
2479 buf->call_axr_cmd = 0;
2480 buf->call_axr_data = 0;
2482 /* build a fake data header in big endian */
2483 buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2484 buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2485 buf->hdr.type = FC_TYPE_FCP;
2486 buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2487 FC_FCTL_FIRST_SEQUENCE |
2488 FC_FCTL_LAST_SEQUENCE |
2489 FC_FCTL_END_SEQUENCE |
2490 FC_FCTL_SEQUENCE_INITIATIVE);
2492 /* build the fake header DMA object */
2493 buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2494 buf->header.dma.virt = &buf->hdr;
2495 buf->header.dma.alloc = buf;
2496 buf->header.dma.size = sizeof(buf->hdr);
2497 buf->header.dma.len = sizeof(buf->hdr);
2498 buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2500 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2502 return OCS_HW_RTN_SUCCESS;
2506 * @ingroup devInitShutdown
2507 * @brief Free auto xfer rdy buffers.
2510 * Frees the auto xfer rdy buffers.
2512 * @param hw Hardware context allocated by the caller.
2514 * @return Returns 0 on success, or a non-zero value on failure.
2517 ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw)
2519 ocs_hw_auto_xfer_rdy_buffer_t *buf;
2522 if (hw->auto_xfer_rdy_buf_pool != NULL) {
2523 ocs_lock(&hw->io_lock);
2524 for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) {
2525 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2527 ocs_dma_free(hw->os, &buf->payload.dma);
2530 ocs_unlock(&hw->io_lock);
2532 ocs_pool_free(hw->auto_xfer_rdy_buf_pool);
2533 hw->auto_xfer_rdy_buf_pool = NULL;
2538 * @ingroup devInitShutdown
2539 * @brief Configure the rq_pair function from ocs_hw_init().
2542 * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature.
2544 * @param hw Hardware context allocated by the caller.
2546 * @return Returns 0 on success, or a non-zero value on failure.
2549 ocs_hw_rqpair_init(ocs_hw_t *hw)
2552 uint32_t xris_posted;
2554 ocs_log_debug(hw->os, "RQ Pair mode\n");
2557 * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would
2558 * return with an error. So allocate the buffers based on the initial XRI pool required to support this
2561 if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
2562 hw->config.auto_xfer_rdy_size > 0) {
2563 if (hw->auto_xfer_rdy_buf_pool == NULL) {
2565 * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have
2566 * one to post back for the case where the response phase is started in the context of
2567 * the data completion.
2569 rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1);
2570 if (rc != OCS_HW_RTN_SUCCESS) {
2574 ocs_pool_reset(hw->auto_xfer_rdy_buf_pool);
2577 /* Post the auto XFR_RDY XRIs */
2578 xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt);
2579 if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) {
2580 ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted);
2581 return OCS_HW_RTN_ERROR;
2589 * @ingroup devInitShutdown
2590 * @brief Tear down the rq_pair function from ocs_hw_teardown().
2593 * Frees the buffers to auto xfer rdy.
2595 * @param hw Hardware context allocated by the caller.
2598 ocs_hw_rqpair_teardown(ocs_hw_t *hw)
2600 /* We need to free any auto xfer ready buffers */
2601 ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw);