2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
41 #include "ocs_hw_queues.h"
43 #define HW_QTOP_DEBUG 0
46 * @brief Initialize queues
48 * Given the parsed queue topology spec, the SLI queues are created and
51 * @param hw pointer to HW object
52 * @param qtop pointer to queue topology
54 * @return returns 0 for success, an error code value for failure.
57 ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
60 uint32_t default_lengths[QTOP_LAST], len;
61 uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0;
62 uint8_t rqset_filter_mask = 0;
63 hw_eq_t *eqs[hw->config.n_rq];
64 hw_cq_t *cqs[hw->config.n_rq];
65 hw_rq_t *rqs[hw->config.n_rq];
66 ocs_hw_qtop_entry_t *qt, *next_qt;
77 default_lengths[QTOP_EQ] = 1024;
78 default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ];
79 default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ];
80 default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ];
81 default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH;
83 ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG);
91 ocs_list_init(&hw->eq_list, hw_eq_t, link);
93 /* If MRQ is requested, Check if it is supported by SLI. */
94 if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) {
95 ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n");
96 return OCS_HW_RTN_ERROR;
99 if (hw->config.n_rq > 1)
102 /* Allocate class WQ pools */
103 for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
104 hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
105 if (hw->wq_class_array[i] == NULL) {
106 ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
107 return OCS_HW_RTN_NO_MEMORY;
111 /* Allocate per CPU WQ pools */
112 for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
113 hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
114 if (hw->wq_cpu_array[i] == NULL) {
115 ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
116 return OCS_HW_RTN_NO_MEMORY;
121 ocs_hw_assert(qtop != NULL);
123 for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
124 if (i == qtop->inuse_count - 1)
131 len = (qt->len) ? qt->len : default_lengths[QTOP_EQ];
133 if (qt->set_default) {
134 default_lengths[QTOP_EQ] = len;
138 eq = hw_new_eq(hw, len);
140 hw_queue_teardown(hw);
141 return OCS_HW_RTN_NO_MEMORY;
146 len = (qt->len) ? qt->len : default_lengths[QTOP_CQ];
148 if (qt->set_default) {
149 default_lengths[QTOP_CQ] = len;
153 if (!eq || !next_qt) {
157 /* If this CQ is for MRQ, then delay the creation */
158 if (!use_mrq || next_qt->entry != QTOP_RQ) {
159 cq = hw_new_cq(eq, len);
168 len = (qt->len) ? qt->len : default_lengths[QTOP_WQ];
169 if (qt->set_default) {
170 default_lengths[QTOP_WQ] = len;
174 if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
175 ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp);
176 hw_queue_teardown(hw);
177 return OCS_HW_RTN_NO_MEMORY;
183 wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp);
188 /* Place this WQ on the EQ WQ array */
189 if (ocs_varray_add(eq->wq_array, wq)) {
190 ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n");
191 hw_queue_teardown(hw);
192 return OCS_HW_RTN_ERROR;
195 /* Place this WQ on the HW class array */
196 if (qt->class < ARRAY_SIZE(hw->wq_class_array)) {
197 if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) {
198 ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n");
199 hw_queue_teardown(hw);
200 return OCS_HW_RTN_ERROR;
203 ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class);
204 hw_queue_teardown(hw);
205 return OCS_HW_RTN_ERROR;
209 * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given
210 * by the EQ instance modulo number of CPUs
212 if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) {
213 ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n");
214 hw_queue_teardown(hw);
215 return OCS_HW_RTN_ERROR;
221 len = (qt->len) ? qt->len : default_lengths[QTOP_RQ];
222 if (qt->set_default) {
223 default_lengths[QTOP_RQ] = len;
227 if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
228 ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp);
229 hw_queue_teardown(hw);
230 return OCS_HW_RTN_NO_MEMORY;
234 mrq.rq_cfg[mrq.num_pairs].len = len;
235 mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp;
236 mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask;
237 mrq.rq_cfg[mrq.num_pairs].eq = eq;
240 rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp);
242 hw_queue_teardown(hw);
243 return OCS_HW_RTN_NO_MEMORY;
245 rq->filter_mask = qt->filter_mask;
251 len = (qt->len) ? qt->len : default_lengths[QTOP_MQ];
252 if (qt->set_default) {
253 default_lengths[QTOP_MQ] = len;
260 mq = hw_new_mq(cq, len);
273 /* First create normal RQs. */
274 for (i = 0; i < mrq.num_pairs; i++) {
275 for (j = 0; j < mrq.num_pairs; j++) {
276 if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) {
277 /* This should be created using set */
278 if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) {
279 ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n");
280 hw_queue_teardown(hw);
281 return OCS_HW_RTN_ERROR;
282 } else if (!rqset_filter_mask){
283 rqset_filter_mask = mrq.rq_cfg[i].filter_mask;
284 rqset_len = mrq.rq_cfg[i].len;
285 rqset_ulp = mrq.rq_cfg[i].ulp;
287 eqs[rqset_count] = mrq.rq_cfg[i].eq;
292 if (j == mrq.num_pairs) {
294 cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]);
296 hw_queue_teardown(hw);
297 return OCS_HW_RTN_NO_MEMORY;
300 rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp);
302 hw_queue_teardown(hw);
303 return OCS_HW_RTN_NO_MEMORY;
305 rq->filter_mask = mrq.rq_cfg[i].filter_mask;
309 /* Now create RQ Set */
311 if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) {
313 "Max Supported MRQ pairs = %d\n",
314 OCE_HW_MAX_NUM_MRQ_PAIRS);
315 hw_queue_teardown(hw);
316 return OCS_HW_RTN_ERROR;
320 if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) {
321 hw_queue_teardown(hw);
322 return OCS_HW_RTN_ERROR;
326 if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) {
327 hw_queue_teardown(hw);
328 return OCS_HW_RTN_ERROR;
331 for (i = 0; i < rqset_count ; i++) {
332 rqs[i]->filter_mask = rqset_filter_mask;
333 rqs[i]->is_mrq = TRUE;
334 rqs[i]->base_mrq_id = rqs[0]->hdr->id;
337 hw->hw_mrq_count = rqset_count;
341 return OCS_HW_RTN_SUCCESS;
343 hw_queue_teardown(hw);
344 return OCS_HW_RTN_NO_MEMORY;
349 * @brief Allocate a new EQ object
351 * A new EQ object is instantiated
353 * @param hw pointer to HW object
354 * @param entry_count number of entries in the EQ
356 * @return pointer to allocated EQ object
359 hw_new_eq(ocs_hw_t *hw, uint32_t entry_count)
361 hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT);
364 eq->type = SLI_QTYPE_EQ;
366 eq->entry_count = entry_count;
367 eq->instance = hw->eq_count++;
368 eq->queue = &hw->eq[eq->instance];
369 ocs_list_init(&eq->cq_list, hw_cq_t, link);
371 eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
372 if (eq->wq_array == NULL) {
373 ocs_free(hw->os, eq, sizeof(*eq));
376 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) {
377 ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance);
378 ocs_free(hw->os, eq, sizeof(*eq));
381 sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
382 hw->hw_eq[eq->instance] = eq;
383 ocs_list_add_tail(&hw->eq_list, eq);
384 ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id,
393 * @brief Allocate a new CQ object
395 * A new CQ object is instantiated
397 * @param eq pointer to parent EQ object
398 * @param entry_count number of entries in the CQ
400 * @return pointer to allocated CQ object
403 hw_new_cq(hw_eq_t *eq, uint32_t entry_count)
405 ocs_hw_t *hw = eq->hw;
406 hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
410 cq->type = SLI_QTYPE_CQ;
411 cq->instance = eq->hw->cq_count++;
412 cq->entry_count = entry_count;
413 cq->queue = &hw->cq[cq->instance];
415 ocs_list_init(&cq->q_list, hw_q_t, link);
417 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) {
418 ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
421 ocs_free(hw->os, cq, sizeof(*cq));
424 hw->hw_cq[cq->instance] = cq;
425 ocs_list_add_tail(&eq->cq_list, cq);
426 ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id,
434 * @brief Allocate a new CQ Set of objects.
436 * @param eqs pointer to a set of EQ objects.
437 * @param cqs pointer to a set of CQ objects to be returned.
438 * @param num_cqs number of CQ queues in the set.
439 * @param entry_count number of entries in the CQ.
441 * @return 0 on success and -1 on failure.
444 hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count)
447 ocs_hw_t *hw = eqs[0]->hw;
448 sli4_t *sli4 = &hw->sli;
450 sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT];
452 /* Initialise CQS pointers to NULL */
453 for (i = 0; i < num_cqs; i++) {
457 for (i = 0; i < num_cqs; i++) {
458 cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
464 cq->type = SLI_QTYPE_CQ;
465 cq->instance = hw->cq_count++;
466 cq->entry_count = entry_count;
467 cq->queue = &hw->cq[cq->instance];
469 assocs[i] = eqs[i]->queue;
470 ocs_list_init(&cq->q_list, hw_q_t, link);
473 if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) {
474 ocs_log_err(NULL, "Failed to create CQ Set. \n");
478 for (i = 0; i < num_cqs; i++) {
479 hw->hw_cq[cqs[i]->instance] = cqs[i];
480 ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]);
486 for (i = 0; i < num_cqs; i++) {
488 ocs_free(hw->os, cqs[i], sizeof(*cqs[i]));
497 * @brief Allocate a new MQ object
499 * A new MQ object is instantiated
501 * @param cq pointer to parent CQ object
502 * @param entry_count number of entries in the MQ
504 * @return pointer to allocated MQ object
507 hw_new_mq(hw_cq_t *cq, uint32_t entry_count)
509 ocs_hw_t *hw = cq->eq->hw;
510 hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT);
514 mq->type = SLI_QTYPE_MQ;
515 mq->instance = cq->eq->hw->mq_count++;
516 mq->entry_count = entry_count;
517 mq->entry_size = OCS_HW_MQ_DEPTH;
518 mq->queue = &hw->mq[mq->instance];
520 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ,
524 ocs_log_err(hw->os, "MQ allocation failure\n");
525 ocs_free(hw->os, mq, sizeof(*mq));
528 hw->hw_mq[mq->instance] = mq;
529 ocs_list_add_tail(&cq->q_list, mq);
530 ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id,
538 * @brief Allocate a new WQ object
540 * A new WQ object is instantiated
542 * @param cq pointer to parent CQ object
543 * @param entry_count number of entries in the WQ
544 * @param class WQ class
545 * @param ulp index of chute
547 * @return pointer to allocated WQ object
550 hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp)
552 ocs_hw_t *hw = cq->eq->hw;
553 hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT);
558 wq->type = SLI_QTYPE_WQ;
559 wq->instance = cq->eq->hw->wq_count++;
560 wq->entry_count = entry_count;
561 wq->queue = &hw->wq[wq->instance];
563 wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT;
564 wq->wqec_count = wq->wqec_set_count;
565 wq->free_count = wq->entry_count - 1;
567 ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link);
569 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) {
570 ocs_log_err(hw->os, "WQ allocation failure\n");
571 ocs_free(hw->os, wq, sizeof(*wq));
574 hw->hw_wq[wq->instance] = wq;
575 ocs_list_add_tail(&cq->q_list, wq);
576 ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id,
577 wq->entry_count, wq->class, wq->ulp);
584 * @brief Allocate a hw_rq_t object
586 * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair)
588 * @param cq pointer to parent CQ object
589 * @param entry_count number of entries in the RQs
590 * @param ulp ULP index for this RQ
592 * @return pointer to newly allocated hw_rq_t
595 hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp)
597 ocs_hw_t *hw = cq->eq->hw;
598 hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
601 ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
605 rq->instance = hw->hw_rq_count++;
607 rq->type = SLI_QTYPE_RQ;
610 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
612 /* Create the header RQ */
613 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
614 rq->hdr = &hw->rq[hw->rq_count];
615 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
617 if (sli_fc_rq_alloc(&hw->sli, rq->hdr,
622 ocs_log_err(hw->os, "RQ allocation failure - header\n");
623 ocs_free(hw->os, rq, sizeof(*rq));
626 hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */
628 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr size %4d ulp %d\n",
629 rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp);
631 /* Create the default data RQ */
632 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
633 rq->data = &hw->rq[hw->rq_count];
634 rq->data_entry_size = hw->config.rq_default_buffer_size;
636 if (sli_fc_rq_alloc(&hw->sli, rq->data,
641 ocs_log_err(hw->os, "RQ allocation failure - first burst\n");
642 ocs_free(hw->os, rq, sizeof(*rq));
645 hw->hw_rq_lookup[hw->rq_count] = rq->instance; /* Update hw_rq_lookup[] */
647 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance,
648 rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp);
650 hw->hw_rq[rq->instance] = rq;
651 ocs_list_add_tail(&cq->q_list, rq);
653 rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
654 rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
655 if (rq->rq_tracker == NULL) {
656 ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
665 * @brief Allocate a hw_rq_t object SET
667 * Allocate an RQ object SET, where each element in set
668 * encapsulates 2 SLI queues (for rq pair)
670 * @param cqs pointers to be associated with RQs.
671 * @param rqs RQ pointers to be returned on success.
672 * @param num_rq_pairs number of rq pairs in the Set.
673 * @param entry_count number of entries in the RQs
674 * @param ulp ULP index for this RQ
676 * @return 0 in success and -1 on failure.
679 hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp)
681 ocs_hw_t *hw = cqs[0]->eq->hw;
683 sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL };
684 uint32_t max_hw_rq, i, q_count;
686 ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
688 /* Initialise RQS pointers */
689 for (i = 0; i < num_rq_pairs; i++) {
693 for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
694 rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
699 rq->instance = hw->hw_rq_count++;
701 rq->type = SLI_QTYPE_RQ;
703 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
706 rq->hdr = &hw->rq[hw->rq_count];
707 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
708 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
710 qs[q_count] = rq->hdr;
713 rq->data = &hw->rq[hw->rq_count];
714 rq->data_entry_size = hw->config.rq_default_buffer_size;
715 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
717 qs[q_count + 1] = rq->data;
719 rq->rq_tracker = NULL;
722 if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
725 rqs[0]->hdr_entry_size,
726 rqs[0]->data_entry_size,
728 ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id);
733 for (i = 0; i < num_rq_pairs; i++) {
734 hw->hw_rq[rqs[i]->instance] = rqs[i];
735 ocs_list_add_tail(&cqs[i]->q_list, rqs[i]);
736 rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
737 rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
738 if (rqs[i]->rq_tracker == NULL) {
739 ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
747 for (i = 0; i < num_rq_pairs; i++) {
748 if (rqs[i] != NULL) {
749 if (rqs[i]->rq_tracker != NULL) {
750 ocs_free(hw->os, rqs[i]->rq_tracker,
751 sizeof(ocs_hw_sequence_t*) *
752 rqs[i]->entry_count);
754 ocs_free(hw->os, rqs[i], sizeof(*rqs[i]));
763 * @brief Free an EQ object
765 * The EQ object and any child queue objects are freed
767 * @param eq pointer to EQ object
772 hw_del_eq(hw_eq_t *eq)
778 ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) {
781 ocs_varray_free(eq->wq_array);
782 ocs_list_remove(&eq->hw->eq_list, eq);
783 eq->hw->hw_eq[eq->instance] = NULL;
784 ocs_free(eq->hw->os, eq, sizeof(*eq));
789 * @brief Free a CQ object
791 * The CQ object and any child queue objects are freed
793 * @param cq pointer to CQ object
798 hw_del_cq(hw_cq_t *cq)
804 ocs_list_foreach_safe(&cq->q_list, q, q_next) {
807 hw_del_mq((hw_mq_t*) q);
810 hw_del_wq((hw_wq_t*) q);
813 hw_del_rq((hw_rq_t*) q);
819 ocs_list_remove(&cq->eq->cq_list, cq);
820 cq->eq->hw->hw_cq[cq->instance] = NULL;
821 ocs_free(cq->eq->hw->os, cq, sizeof(*cq));
826 * @brief Free a MQ object
828 * The MQ object is freed
830 * @param mq pointer to MQ object
835 hw_del_mq(hw_mq_t *mq)
838 ocs_list_remove(&mq->cq->q_list, mq);
839 mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
840 ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq));
845 * @brief Free a WQ object
847 * The WQ object is freed
849 * @param wq pointer to WQ object
854 hw_del_wq(hw_wq_t *wq)
857 ocs_list_remove(&wq->cq->q_list, wq);
858 wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
859 ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq));
864 * @brief Free an RQ object
866 * The RQ object is freed
868 * @param rq pointer to RQ object
873 hw_del_rq(hw_rq_t *rq)
877 ocs_hw_t *hw = rq->cq->eq->hw;
878 /* Free RQ tracker */
879 if (rq->rq_tracker != NULL) {
880 ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count);
881 rq->rq_tracker = NULL;
883 ocs_list_remove(&rq->cq->q_list, rq);
884 hw->hw_rq[rq->instance] = NULL;
885 ocs_free(hw->os, rq, sizeof(*rq));
890 * @brief Display HW queue objects
892 * The HW queue objects are displayed using ocs_log
894 * @param hw pointer to HW object
899 hw_queue_dump(ocs_hw_t *hw)
908 ocs_list_foreach(&hw->eq_list, eq) {
909 ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id);
910 ocs_list_foreach(&eq->cq_list, cq) {
911 ocs_printf(" cq[%d] id %2d current\n", cq->instance, cq->queue->id);
912 ocs_list_foreach(&cq->q_list, q) {
916 ocs_printf(" mq[%d] id %2d\n", mq->instance, mq->queue->id);
920 ocs_printf(" wq[%d] id %2d\n", wq->instance, wq->queue->id);
924 ocs_printf(" rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id);
935 * @brief Teardown HW queue objects
937 * The HW queue objects are freed
939 * @param hw pointer to HW object
944 hw_queue_teardown(ocs_hw_t *hw)
950 if (ocs_list_valid(&hw->eq_list)) {
951 ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) {
955 for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
956 ocs_varray_free(hw->wq_cpu_array[i]);
957 hw->wq_cpu_array[i] = NULL;
959 for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
960 ocs_varray_free(hw->wq_class_array[i]);
961 hw->wq_class_array[i] = NULL;
966 * @brief Allocate a WQ to an IO object
968 * The next work queue index is used to assign a WQ to an IO.
970 * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is
973 * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that
974 * the IO request came in on is selected.
976 * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associted with the
977 * CPU the request is made on is selected.
979 * @param hw pointer to HW object
980 * @param io pointer to IO object
982 * @return Return pointer to next WQ
985 ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io)
990 switch(io->wq_steering) {
991 case OCS_HW_WQ_STEERING_CLASS:
992 if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) {
993 wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]);
996 case OCS_HW_WQ_STEERING_REQUEST:
998 if (likely(eq != NULL)) {
999 wq = ocs_varray_iter_next(eq->wq_array);
1002 case OCS_HW_WQ_STEERING_CPU: {
1003 uint32_t cpuidx = ocs_thread_getcpu();
1005 if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) {
1006 wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]);
1012 if (unlikely(wq == NULL)) {
1020 * @brief Return count of EQs for a queue topology object
1022 * The EQ count for in the HWs queue topology (hw->qtop) object is returned
1024 * @param hw pointer to HW object
1026 * @return count of EQs
1029 ocs_hw_qtop_eq_count(ocs_hw_t *hw)
1031 return hw->qtop->entry_counts[QTOP_EQ];
1034 #define TOKEN_LEN 32
1037 * @brief return string given a QTOP entry
1039 * @param entry QTOP entry
1041 * @return returns string or "unknown"
1045 qtopentry2s(ocs_hw_qtop_entry_e entry) {
1047 #define P(x) case x: return #x;
1053 P(QTOP_THREAD_START)
1063 * @brief Declare token types
1078 * @brief Declare token sub-types
1093 * @brief convert queue subtype to QTOP entry
1095 * @param q queue subtype
1097 * @return QTOP entry or 0
1099 static ocs_hw_qtop_entry_e
1100 subtype2qtop(tok_subtype_e q)
1103 case TOK_SUB_EQ: return QTOP_EQ;
1104 case TOK_SUB_CQ: return QTOP_CQ;
1105 case TOK_SUB_RQ: return QTOP_RQ;
1106 case TOK_SUB_MQ: return QTOP_MQ;
1107 case TOK_SUB_WQ: return QTOP_WQ;
1115 * @brief Declare token object
1119 tok_subtype_e subtype;
1120 char string[TOKEN_LEN];
1124 * @brief Declare token array object
1127 tok_t *tokens; /* Pointer to array of tokens */
1128 uint32_t alloc_count; /* Number of tokens in the array */
1129 uint32_t inuse_count; /* Number of tokens posted to array */
1130 uint32_t iter_idx; /* Iterator index */
1134 * @brief Declare token match structure
1139 tok_subtype_e subtype;
1143 * @brief test if character is ID start character
1145 * @param c character to test
1147 * @return TRUE if character is an ID start character
1152 return isalpha(c) || (c == '_') || (c == '$');
1156 * @brief test if character is an ID character
1158 * @param c character to test
1160 * @return TRUE if character is an ID character
1165 return idstart(c) || ocs_isdigit(c);
1169 * @brief Declare single character matches
1171 static tokmatch_t cmatches[] = {
1179 * @brief Declare identifier match strings
1181 static tokmatch_t smatches[] = {
1182 {"eq", TOK_QUEUE, TOK_SUB_EQ},
1183 {"cq", TOK_QUEUE, TOK_SUB_CQ},
1184 {"rq", TOK_QUEUE, TOK_SUB_RQ},
1185 {"mq", TOK_QUEUE, TOK_SUB_MQ},
1186 {"wq", TOK_QUEUE, TOK_SUB_WQ},
1187 {"len", TOK_ATTR_NAME, TOK_SUB_LEN},
1188 {"class", TOK_ATTR_NAME, TOK_SUB_CLASS},
1189 {"ulp", TOK_ATTR_NAME, TOK_SUB_ULP},
1190 {"filter", TOK_ATTR_NAME, TOK_SUB_FILTER},
1194 * @brief Scan string and return next token
1196 * The string is scanned and the next token is returned
1198 * @param s input string to scan
1199 * @param tok pointer to place scanned token
1201 * @return pointer to input string following scanned token, or NULL
1204 tokenize(const char *s, tok_t *tok)
1208 memset(tok, 0, sizeof(*tok));
1210 /* Skip over whitespace */
1211 while (*s && ocs_isspace(*s)) {
1215 /* Return if nothing left in this string */
1220 /* Look for single character matches */
1221 for (i = 0; i < ARRAY_SIZE(cmatches); i++) {
1222 if (cmatches[i].s[0] == *s) {
1223 tok->type = cmatches[i].type;
1224 tok->subtype = cmatches[i].subtype;
1225 tok->string[0] = *s++;
1230 /* Scan for a hex number or decimal */
1231 if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) {
1232 char *p = tok->string;
1234 tok->type = TOK_NUMBER;
1238 while ((*s == '.') || ocs_isxdigit(*s)) {
1239 if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1243 tok->type = TOK_NUMBER_LIST;
1249 } else if (ocs_isdigit(*s)) {
1250 char *p = tok->string;
1252 tok->type = TOK_NUMBER;
1253 while ((*s == ',') || ocs_isdigit(*s)) {
1254 if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1258 tok->type = TOK_NUMBER_LIST;
1266 /* Scan for an ID */
1268 char *p = tok->string;
1270 for (*p++ = *s++; idchar(*s); s++) {
1271 if ((p - tok->string) < TOKEN_LEN) {
1276 /* See if this is a $ number value */
1277 if (tok->string[0] == '$') {
1278 tok->type = TOK_NUMBER_VALUE;
1280 /* Look for a string match */
1281 for (i = 0; i < ARRAY_SIZE(smatches); i++) {
1282 if (strcmp(smatches[i].s, tok->string) == 0) {
1283 tok->type = smatches[i].type;
1284 tok->subtype = smatches[i].subtype;
1294 * @brief convert token type to string
1296 * @param type token type
1298 * @return string, or "unknown"
1301 token_type2s(tok_type_e type)
1304 #define P(x) case x: return #x;
1320 * @brief convert token sub-type to string
1322 * @param subtype token sub-type
1324 * @return string, or "unknown"
1327 token_subtype2s(tok_subtype_e subtype)
1330 #define P(x) case x: return #x;
1346 * @brief Generate syntax error message
1348 * A syntax error message is found, the input tokens are dumped up to and including
1349 * the token that failed as indicated by the current iterator index.
1351 * @param hw pointer to HW object
1352 * @param tokarray pointer to token array object
1357 tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray)
1362 ocs_log_test(hw->os, "Syntax error:\n");
1364 for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) {
1365 ocs_log_test(hw->os, "%s [%2d] %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : " ", i,
1366 token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string);
1371 * @brief parse a number
1373 * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value
1375 * @param hw pointer to HW object
1376 * @param qtop pointer to QTOP object
1377 * @param tok pointer to token to parse
1379 * @return numeric value
1382 tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok)
1385 uint32_t num_cpus = ocs_get_num_cpus();
1388 case TOK_NUMBER_VALUE:
1389 if (ocs_strcmp(tok->string, "$ncpu") == 0) {
1391 } else if (ocs_strcmp(tok->string, "$ncpu1") == 0) {
1392 rval = num_cpus - 1;
1393 } else if (ocs_strcmp(tok->string, "$nwq") == 0) {
1395 rval = hw->config.n_wq;
1397 } else if (ocs_strcmp(tok->string, "$maxmrq") == 0) {
1398 rval = MIN(num_cpus, OCS_HW_MAX_MRQS);
1399 } else if (ocs_strcmp(tok->string, "$nulp") == 0) {
1400 rval = hw->ulp_max - hw->ulp_start + 1;
1401 } else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) {
1402 rval = qtop->rptcount[qtop->rptcount_idx-1];
1403 } else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) {
1404 rval = qtop->rptcount[qtop->rptcount_idx-2];
1405 } else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) {
1406 rval = qtop->rptcount[qtop->rptcount_idx-3];
1407 } else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) {
1408 rval = qtop->rptcount[qtop->rptcount_idx-4];
1410 rval = ocs_strtoul(tok->string, 0, 0);
1414 rval = ocs_strtoul(tok->string, 0, 0);
1424 * @brief parse an array of tokens
1426 * The tokens are semantically parsed, to generate QTOP entries.
1428 * @param hw pointer to HW object
1429 * @param tokarray array array of tokens
1430 * @param qtop ouptut QTOP object
1432 * @return returns 0 for success, a negative error code value for failure.
1435 parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop)
1437 ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count;
1440 for (; (tokarray->iter_idx < tokarray->inuse_count) &&
1441 ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) {
1442 if (qtop->inuse_count >= qtop->alloc_count) {
1446 qt = qtop->entries + qtop->inuse_count;
1448 switch (tok[0].type)
1451 qt->entry = subtype2qtop(tok[0].subtype);
1452 qt->set_default = FALSE;
1455 qtop->inuse_count++;
1457 tokarray->iter_idx++; /* Advance current token index */
1459 /* Parse for queue attributes, possibly multiple instances */
1460 while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) {
1461 tok = &tokarray->tokens[tokarray->iter_idx];
1462 if( (tok[0].type == TOK_COLON) &&
1463 (tok[1].type == TOK_ATTR_NAME) &&
1464 (tok[2].type == TOK_EQUALS) &&
1465 ((tok[3].type == TOK_NUMBER) ||
1466 (tok[3].type == TOK_NUMBER_VALUE) ||
1467 (tok[3].type == TOK_NUMBER_LIST))) {
1469 switch (tok[1].subtype) {
1471 qt->len = tok_getnumber(hw, qtop, &tok[3]);
1475 qt->class = tok_getnumber(hw, qtop, &tok[3]);
1479 qt->ulp = tok_getnumber(hw, qtop, &tok[3]);
1482 case TOK_SUB_FILTER:
1483 if (tok[3].type == TOK_NUMBER_LIST) {
1485 char *p = tok[3].string;
1487 while ((p != NULL) && *p) {
1490 v = ocs_strtoul(p, 0, 0);
1495 p = ocs_strchr(p, ',');
1500 qt->filter_mask = mask;
1502 qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3]));
1508 /* Advance current token index */
1509 tokarray->iter_idx += 4;
1514 qtop->entry_counts[qt->entry]++;
1518 if ( ((tokarray->iter_idx + 5) <= tokarray->inuse_count) &&
1519 (tok[1].type == TOK_COLON) &&
1520 (tok[2].type == TOK_QUEUE) &&
1521 (tok[3].type == TOK_EQUALS) &&
1522 ((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) {
1523 qt->entry = subtype2qtop(tok[2].subtype);
1524 qt->set_default = TRUE;
1525 switch(tok[0].subtype) {
1527 qt->len = tok_getnumber(hw, qtop, &tok[4]);
1530 qt->class = tok_getnumber(hw, qtop, &tok[4]);
1533 qt->ulp = tok_getnumber(hw, qtop, &tok[4]);
1538 qtop->inuse_count++;
1539 tokarray->iter_idx += 5;
1541 tok_syntax(hw, tokarray);
1547 case TOK_NUMBER_VALUE: {
1548 uint32_t rpt_count = 1;
1551 rpt_count = tok_getnumber(hw, qtop, tok);
1553 if (tok[1].type == TOK_LPAREN) {
1554 uint32_t iter_idx_save;
1556 tokarray->iter_idx += 2;
1558 /* save token array iteration index */
1559 iter_idx_save = tokarray->iter_idx;
1561 for (i = 0; i < rpt_count; i++) {
1562 uint32_t rptcount_idx = qtop->rptcount_idx;
1564 if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) {
1565 qtop->rptcount[qtop->rptcount_idx++] = i;
1568 /* restore token array iteration index */
1569 tokarray->iter_idx = iter_idx_save;
1571 /* parse, append to qtop */
1572 parse_topology(hw, tokarray, qtop);
1574 qtop->rptcount_idx = rptcount_idx;
1581 tokarray->iter_idx++;
1585 tok_syntax(hw, tokarray);
1593 * @brief Parse queue topology string
1595 * The queue topology object is allocated, and filled with the results of parsing the
1596 * passed in queue topology string
1598 * @param hw pointer to HW object
1599 * @param qtop_string input queue topology string
1601 * @return pointer to allocated QTOP object, or NULL if there was an error
1604 ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string)
1606 ocs_hw_qtop_t *qtop;
1607 tokarray_t tokarray;
1611 ocs_hw_qtop_entry_t *qt;
1614 ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string);
1616 /* Allocate a token array */
1617 tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT);
1618 if (tokarray.tokens == NULL) {
1621 tokarray.alloc_count = MAX_TOKENS;
1622 tokarray.inuse_count = 0;
1623 tokarray.iter_idx = 0;
1625 /* Parse the tokens */
1626 for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) &&
1627 ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) {
1628 tokarray.inuse_count++;
1631 /* Allocate a queue topology structure */
1632 qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT);
1634 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1635 ocs_log_err(hw->os, "malloc qtop failed\n");
1640 /* Allocate queue topology entries */
1641 qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT);
1642 if (qtop->entries == NULL) {
1643 ocs_log_err(hw->os, "malloc qtop entries failed\n");
1644 ocs_free(hw->os, qtop, sizeof(*qtop));
1645 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1648 qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES;
1649 qtop->inuse_count = 0;
1651 /* Parse the tokens */
1652 parse_topology(hw, &tokarray, qtop);
1654 for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
1655 ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len,
1656 qt->class, qt->ulp);
1660 /* Free the tokens array */
1661 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1667 * @brief free queue topology object
1669 * @param qtop pointer to QTOP object
1674 ocs_hw_qtop_free(ocs_hw_qtop_t *qtop)
1677 if (qtop->entries != NULL) {
1678 ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries));
1680 ocs_free(qtop->os, qtop, sizeof(*qtop));
1684 /* Uncomment this to turn on RQ debug */
1685 // #define ENABLE_DEBUG_RQBUF
1687 static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id);
1688 static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex);
1689 static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1690 static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1693 * @brief Process receive queue completions for RQ Pair mode.
1696 * RQ completions are processed. In RQ pair mode, a single header and single payload
1697 * buffer are received, and passed to the function that has registered for unsolicited
1700 * @param hw Hardware context.
1701 * @param cq Pointer to HW completion queue.
1702 * @param cqe Completion queue entry.
1704 * @return Returns 0 for success, or a negative error code value for failure.
1708 ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1716 ocs_hw_sequence_t *seq;
1718 rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1719 if (0 != rq_status) {
1720 switch (rq_status) {
1721 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1722 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1723 /* just get RQ buffer then return to chip */
1724 rqindex = ocs_hw_rqpair_find(hw, rq_id);
1726 ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1732 seq = ocs_hw_rqpair_get(hw, rqindex, index);
1734 /* return to chip */
1735 if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1736 ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n",
1741 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1742 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1743 /* since RQ buffers were not consumed, cannot return them to chip */
1745 ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1752 rqindex = ocs_hw_rqpair_find(hw, rq_id);
1754 ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1758 OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1759 rq->payload_use_count++;})
1761 seq = ocs_hw_rqpair_get(hw, rqindex, index);
1762 ocs_hw_assert(seq != NULL);
1766 seq->out_of_xris = 0;
1770 sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1771 seq->header->dma.len = h_len;
1772 seq->payload->dma.len = p_len;
1773 seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1774 seq->hw_priv = cq->eq;
1776 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1777 if (hw->config.bounce) {
1778 fc_header_t *hdr = seq->header->dma.virt;
1779 uint32_t s_id = fc_be24toh(hdr->s_id);
1780 uint32_t d_id = fc_be24toh(hdr->d_id);
1781 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
1782 if (hw->callback.bounce != NULL) {
1783 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1786 hw->callback.unsolicited(hw->args.unsolicited, seq);
1793 * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy
1796 * RQ completions are processed. In RQ pair mode, a single header and single payload
1797 * buffer are received, and passed to the function that has registered for unsolicited
1800 * @param hw Hardware context.
1801 * @param cq Pointer to HW completion queue.
1802 * @param cqe Completion queue entry.
1804 * @return Returns 0 for success, or a negative error code value for failure.
1808 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1810 /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1811 sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe;
1818 ocs_hw_sequence_t *seq;
1819 uint8_t axr_lock_taken = 0;
1820 #if defined(OCS_DISC_SPIN_DELAY)
1825 rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1826 if (0 != rq_status) {
1827 switch (rq_status) {
1828 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1829 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1830 /* just get RQ buffer then return to chip */
1831 rqindex = ocs_hw_rqpair_find(hw, rq_id);
1833 ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1839 seq = ocs_hw_rqpair_get(hw, rqindex, index);
1841 /* return to chip */
1842 if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1843 ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n",
1848 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1849 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1850 /* since RQ buffers were not consumed, cannot return them to chip */
1851 ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1859 rqindex = ocs_hw_rqpair_find(hw, rq_id);
1861 ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1865 OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1866 rq->payload_use_count++;})
1868 seq = ocs_hw_rqpair_get(hw, rqindex, index);
1869 ocs_hw_assert(seq != NULL);
1872 seq->auto_xrdy = opt_wr->agxr;
1873 seq->out_of_xris = opt_wr->oox;
1874 seq->xri = opt_wr->xri;
1877 sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1878 seq->header->dma.len = h_len;
1879 seq->payload->dma.len = p_len;
1880 seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1881 seq->hw_priv = cq->eq;
1883 if (seq->auto_xrdy) {
1884 fc_header_t *fc_hdr = seq->header->dma.virt;
1886 seq->hio = ocs_hw_io_lookup(hw, seq->xri);
1887 ocs_lock(&seq->hio->axr_lock);
1890 /* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */
1891 seq->hio->axr_buf->fcfi = seq->fcfi;
1892 seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id;
1893 seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id;
1894 seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id;
1895 seq->hio->axr_buf->cmd_cqe = 1;
1898 * Since auto xfer rdy is used for this IO, then clear the sequence
1899 * initiative bit in the header so that the upper layers wait for the
1900 * data. This should flow exactly like the first burst case.
1902 fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE);
1904 /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
1905 if (seq->hio->type == OCS_HW_IO_TARGET_RSP) {
1906 seq->hio->axr_buf->call_axr_cmd = 1;
1907 seq->hio->axr_buf->cmd_seq = seq;
1908 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd;
1912 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1913 if (hw->config.bounce) {
1914 fc_header_t *hdr = seq->header->dma.virt;
1915 uint32_t s_id = fc_be24toh(hdr->s_id);
1916 uint32_t d_id = fc_be24toh(hdr->d_id);
1917 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
1918 if (hw->callback.bounce != NULL) {
1919 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1922 hw->callback.unsolicited(hw->args.unsolicited, seq);
1925 if (seq->auto_xrdy) {
1926 /* If data cqe came before cmd cqe in out of order in case of AXR */
1927 if(seq->hio->axr_buf->data_cqe == 1) {
1929 #if defined(OCS_DISC_SPIN_DELAY)
1930 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
1931 delay = ocs_strtoul(prop_buf, 0, 0);
1935 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1936 if (hw->config.bounce) {
1937 fc_header_t *hdr = seq->header->dma.virt;
1938 uint32_t s_id = fc_be24toh(hdr->s_id);
1939 uint32_t d_id = fc_be24toh(hdr->d_id);
1940 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
1941 if (hw->callback.bounce != NULL) {
1942 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id);
1945 hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq);
1950 exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd:
1951 if(axr_lock_taken) {
1952 ocs_unlock(&seq->hio->axr_lock);
1958 * @brief Process CQ completions for Auto xfer rdy data phases.
1961 * The data is DMA'd into the data buffer posted to the SGL prior to the XRI
1962 * being assigned to an IO. When the completion is received, All of the data
1963 * is in the single buffer.
1965 * @param hw Hardware context.
1966 * @param cq Pointer to HW completion queue.
1967 * @param cqe Completion queue entry.
1969 * @return Returns 0 for success, or a negative error code value for failure.
1973 ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1975 /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1976 sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe;
1977 ocs_hw_sequence_t *seq;
1979 ocs_hw_auto_xfer_rdy_buffer_t *buf;
1980 #if defined(OCS_DISC_SPIN_DELAY)
1984 /* Look up the IO */
1985 io = ocs_hw_io_lookup(hw, opt_wr->xri);
1986 ocs_lock(&io->axr_lock);
1992 seq->out_of_xris = 0;
1993 seq->xri = opt_wr->xri;
1995 seq->header = &buf->header;
1996 seq->payload = &buf->payload;
1998 seq->header->dma.len = sizeof(fc_header_t);
1999 seq->payload->dma.len = opt_wr->total_data_placed;
2000 seq->fcfi = buf->fcfi;
2001 seq->hw_priv = cq->eq;
2004 if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) {
2005 seq->status = OCS_HW_UNSOL_SUCCESS;
2006 } else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) {
2007 seq->status = OCS_HW_UNSOL_ABTS_RCVD;
2009 seq->status = OCS_HW_UNSOL_ERROR;
2012 /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
2013 if(io->type == OCS_HW_IO_TARGET_RSP) {
2014 io->axr_buf->call_axr_data = 1;
2015 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2019 /* if data cqe came before cmd cqe, return here, cmd cqe will handle */
2020 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2022 #if defined(OCS_DISC_SPIN_DELAY)
2023 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
2024 delay = ocs_strtoul(prop_buf, 0, 0);
2029 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
2030 if (hw->config.bounce) {
2031 fc_header_t *hdr = seq->header->dma.virt;
2032 uint32_t s_id = fc_be24toh(hdr->s_id);
2033 uint32_t d_id = fc_be24toh(hdr->d_id);
2034 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
2035 if (hw->callback.bounce != NULL) {
2036 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
2039 hw->callback.unsolicited(hw->args.unsolicited, seq);
2042 exit_ocs_hw_rqpair_process_auto_xfr_rdy_data:
2043 ocs_unlock(&io->axr_lock);
2048 * @brief Return pointer to RQ buffer entry.
2051 * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex.
2053 * @param hw Hardware context.
2054 * @param rqindex Index of the RQ that is being processed.
2055 * @param bufindex Index into the RQ that is being processed.
2057 * @return Pointer to the sequence structure, or NULL otherwise.
2059 static ocs_hw_sequence_t *
2060 ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex)
2062 sli4_queue_t *rq_hdr = &hw->rq[rqindex];
2063 sli4_queue_t *rq_payload = &hw->rq[rqindex+1];
2064 ocs_hw_sequence_t *seq = NULL;
2065 hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
2067 #if defined(ENABLE_DEBUG_RQBUF)
2068 uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff);
2071 if (bufindex >= rq_hdr->length) {
2072 ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n",
2073 rqindex, bufindex, rq_hdr->length, rq_hdr->id);
2077 sli_queue_lock(rq_hdr);
2078 sli_queue_lock(rq_payload);
2080 #if defined(ENABLE_DEBUG_RQBUF)
2081 /* Put a debug value into the rq, to track which entries are still valid */
2082 _sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value);
2083 _sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value);
2086 seq = rq->rq_tracker[bufindex];
2087 rq->rq_tracker[bufindex] = NULL;
2090 ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n",
2091 rqindex, bufindex, rq_hdr->index);
2094 sli_queue_unlock(rq_payload);
2095 sli_queue_unlock(rq_hdr);
2100 * @brief Posts an RQ buffer to a queue and update the verification structures
2102 * @param hw hardware context
2103 * @param seq Pointer to sequence object.
2105 * @return Returns 0 on success, or a non-zero value otherwise.
2108 ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2110 sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex];
2111 sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex];
2112 uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
2113 hw_rq_t *rq = hw->hw_rq[hw_rq_index];
2114 uint32_t phys_hdr[2];
2115 uint32_t phys_payload[2];
2117 int32_t qindex_payload;
2119 /* Update the RQ verification lookup tables */
2120 phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys);
2121 phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys);
2122 phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys);
2123 phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys);
2125 sli_queue_lock(rq_hdr);
2126 sli_queue_lock(rq_payload);
2129 * Note: The header must be posted last for buffer pair mode because
2130 * posting on the header queue posts the payload queue as well.
2131 * We do not ring the payload queue independently in RQ pair mode.
2133 qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload);
2134 qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr);
2135 if (qindex_hdr < 0 ||
2136 qindex_payload < 0) {
2137 ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
2138 sli_queue_unlock(rq_payload);
2139 sli_queue_unlock(rq_hdr);
2140 return OCS_HW_RTN_ERROR;
2143 /* ensure the indexes are the same */
2144 ocs_hw_assert(qindex_hdr == qindex_payload);
2146 /* Update the lookup table */
2147 if (rq->rq_tracker[qindex_hdr] == NULL) {
2148 rq->rq_tracker[qindex_hdr] = seq;
2150 ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n",
2151 hw_rq_index, qindex_hdr);
2154 sli_queue_unlock(rq_payload);
2155 sli_queue_unlock(rq_hdr);
2156 return OCS_HW_RTN_SUCCESS;
2160 * @brief Return RQ buffers (while in RQ pair mode).
2163 * The header and payload buffers are returned to the Receive Queue.
2165 * @param hw Hardware context.
2166 * @param seq Header/payload sequence buffers.
2168 * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure.
2172 ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2174 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2176 /* Check for auto xfer rdy dummy buffers and call the proper release function. */
2177 if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) {
2178 return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq);
2182 * Post the data buffer first. Because in RQ pair mode, ringing the
2183 * doorbell of the header ring will post the data buffer as well.
2185 if (ocs_hw_rqpair_put(hw, seq)) {
2186 ocs_log_err(hw->os, "error writing buffers\n");
2187 return OCS_HW_RTN_ERROR;
2194 * @brief Find the RQ index of RQ_ID.
2196 * @param hw Hardware context.
2197 * @param rq_id RQ ID to find.
2199 * @return Returns the RQ index, or -1 if not found
2201 static inline int32_t
2202 ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id)
2204 return ocs_hw_queue_hash_find(hw->rq_hash, rq_id);
2208 * @ingroup devInitShutdown
2209 * @brief Allocate auto xfer rdy buffers.
2212 * Allocates the auto xfer rdy buffers and places them on the free list.
2214 * @param hw Hardware context allocated by the caller.
2215 * @param num_buffers Number of buffers to allocate.
2217 * @return Returns 0 on success, or a non-zero value on failure.
2220 ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers)
2222 ocs_hw_auto_xfer_rdy_buffer_t *buf;
2225 hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE);
2226 if (hw->auto_xfer_rdy_buf_pool == NULL) {
2227 ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n");
2228 return OCS_HW_RTN_NO_MEMORY;
2231 for (i = 0; i < num_buffers; i++) {
2232 /* allocate the wrapper object */
2233 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2234 ocs_hw_assert(buf != NULL);
2236 /* allocate the auto xfer ready buffer */
2237 if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) {
2238 ocs_log_err(hw->os, "DMA allocation failed\n");
2239 ocs_free(hw->os, buf, sizeof(*buf));
2240 return OCS_HW_RTN_NO_MEMORY;
2243 /* build a fake data header in big endian */
2244 buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2245 buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2246 buf->hdr.type = FC_TYPE_FCP;
2247 buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2248 FC_FCTL_FIRST_SEQUENCE |
2249 FC_FCTL_LAST_SEQUENCE |
2250 FC_FCTL_END_SEQUENCE |
2251 FC_FCTL_SEQUENCE_INITIATIVE);
2253 /* build the fake header DMA object */
2254 buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2255 buf->header.dma.virt = &buf->hdr;
2256 buf->header.dma.alloc = buf;
2257 buf->header.dma.size = sizeof(buf->hdr);
2258 buf->header.dma.len = sizeof(buf->hdr);
2260 buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2262 return OCS_HW_RTN_SUCCESS;
2266 * @ingroup devInitShutdown
2267 * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX.
2270 * When new buffers are freed, check existing XRIs waiting for buffers.
2272 * @param hw Hardware context allocated by the caller.
2275 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw)
2280 ocs_lock(&hw->io_lock);
2282 while (!ocs_list_empty(&hw->io_port_dnrx)) {
2283 io = ocs_list_remove_head(&hw->io_port_dnrx);
2284 rc = ocs_hw_reque_xri(hw, io);
2290 ocs_unlock(&hw->io_lock);
2294 * @brief Called when the POST_SGL_PAGE command completes.
2297 * Free the mailbox command buffer.
2299 * @param hw Hardware context.
2300 * @param status Status field from the mbox completion.
2301 * @param mqe Mailbox response structure.
2302 * @param arg Pointer to a callback function that signals the caller that the command is done.
2304 * @return Returns 0.
2307 ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
2310 ocs_log_debug(hw->os, "Status 0x%x\n", status);
2313 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2318 * @brief Prepares an XRI to move to the chip.
2321 * Puts the data SGL into the SGL list for the IO object and possibly registers
2322 * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are
2323 * mailbox commands, we don't need to wait for completion before preceding.
2325 * @param hw Hardware context allocated by the caller.
2326 * @param io Pointer to the IO object.
2328 * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure.
2331 ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io)
2333 /* We only need to preregister the SGL if it has not yet been done. */
2334 if (!sli_get_sgl_preregister(&hw->sli)) {
2336 ocs_dma_t *psgls = &io->def_sgl;
2337 ocs_dma_t **sgls = &psgls;
2339 /* non-local buffer required for mailbox queue */
2340 post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2341 if (post_sgl == NULL) {
2342 ocs_log_err(hw->os, "no buffer for command\n");
2343 return OCS_HW_RTN_NO_MEMORY;
2345 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE,
2346 io->indicator, 1, sgls, NULL, NULL)) {
2347 if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT,
2348 ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) {
2349 ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE);
2350 ocs_log_err(hw->os, "SGL post failed\n");
2351 return OCS_HW_RTN_ERROR;
2356 ocs_lock(&hw->io_lock);
2357 if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */
2358 ocs_unlock(&hw->io_lock);
2359 return OCS_HW_RTN_ERROR;
2361 ocs_unlock(&hw->io_lock);
2362 return OCS_HW_RTN_SUCCESS;
2366 * @brief Prepares an XRI to move back to the host.
2369 * Releases any attached buffer back to the pool.
2371 * @param hw Hardware context allocated by the caller.
2372 * @param io Pointer to the IO object.
2375 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io)
2377 if (io->axr_buf != NULL) {
2378 ocs_lock(&hw->io_lock);
2379 /* check list and remove if there */
2380 if (ocs_list_on_list(&io->dnrx_link)) {
2381 ocs_list_remove(&hw->io_port_dnrx, io);
2382 io->auto_xfer_rdy_dnrx = 0;
2384 /* release the count for waiting for a buffer */
2385 ocs_hw_io_free(hw, io);
2388 ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
2390 ocs_unlock(&hw->io_lock);
2392 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2399 * @brief Posts an auto xfer rdy buffer to an IO.
2402 * Puts the data SGL into the SGL list for the IO object
2404 * @b Note: io_lock must be held.
2406 * @param hw Hardware context allocated by the caller.
2407 * @param io Pointer to the IO object.
2409 * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs.
2412 ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf)
2414 ocs_hw_auto_xfer_rdy_buffer_t *buf;
2418 buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool);
2422 data = io->def_sgl.virt;
2423 data[0].sge_type = SLI4_SGE_TYPE_SKIP;
2427 * Note: if we are doing DIF assists, then the SGE[1] must contain the
2428 * DI_SEED SGE. The host is responsible for programming:
2429 * SGE Type (Word 2, bits 30:27)
2430 * Replacement App Tag (Word 2 bits 15:0)
2431 * App Tag (Word 3 bits 15:0)
2432 * New Ref Tag (Word 3 bit 23)
2433 * Metadata Enable (Word 3 bit 20)
2434 * Auto-Increment RefTag (Word 3 bit 19)
2435 * Block Size (Word 3 bits 18:16)
2436 * The following fields are managed by the SLI Port:
2437 * Ref Tag Compare (Word 0)
2438 * Replacement Ref Tag (Word 1) - In not the LBA
2439 * NA (Word 2 bit 25)
2440 * Opcode RX (Word 3 bits 27:24)
2441 * Checksum Enable (Word 3 bit 22)
2442 * RefTag Enable (Word 3 bit 21)
2444 * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared.
2446 if (hw->config.auto_xfer_rdy_p_type) {
2447 sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1];
2449 diseed->sge_type = SLI4_SGE_TYPE_DISEED;
2450 diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value;
2451 diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value;
2452 diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid;
2453 diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */
2454 diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
2456 data[1].sge_type = SLI4_SGE_TYPE_SKIP;
2460 data[2].sge_type = SLI4_SGE_TYPE_DATA;
2461 data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys);
2462 data[2].buffer_address_low = ocs_addr32_lo(io->axr_buf->payload.dma.phys);
2463 data[2].buffer_length = io->axr_buf->payload.dma.size;
2464 data[2].last = TRUE;
2465 data[3].sge_type = SLI4_SGE_TYPE_SKIP;
2471 * @brief Return auto xfer ready buffers (while in RQ pair mode).
2474 * The header and payload buffers are returned to the auto xfer rdy pool.
2476 * @param hw Hardware context.
2477 * @param seq Header/payload sequence buffers.
2479 * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure.
2483 ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2485 ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc;
2490 buf->call_axr_cmd = 0;
2491 buf->call_axr_data = 0;
2493 /* build a fake data header in big endian */
2494 buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2495 buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2496 buf->hdr.type = FC_TYPE_FCP;
2497 buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2498 FC_FCTL_FIRST_SEQUENCE |
2499 FC_FCTL_LAST_SEQUENCE |
2500 FC_FCTL_END_SEQUENCE |
2501 FC_FCTL_SEQUENCE_INITIATIVE);
2503 /* build the fake header DMA object */
2504 buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2505 buf->header.dma.virt = &buf->hdr;
2506 buf->header.dma.alloc = buf;
2507 buf->header.dma.size = sizeof(buf->hdr);
2508 buf->header.dma.len = sizeof(buf->hdr);
2509 buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2511 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2513 return OCS_HW_RTN_SUCCESS;
2517 * @ingroup devInitShutdown
2518 * @brief Free auto xfer rdy buffers.
2521 * Frees the auto xfer rdy buffers.
2523 * @param hw Hardware context allocated by the caller.
2525 * @return Returns 0 on success, or a non-zero value on failure.
2528 ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw)
2530 ocs_hw_auto_xfer_rdy_buffer_t *buf;
2533 if (hw->auto_xfer_rdy_buf_pool != NULL) {
2534 ocs_lock(&hw->io_lock);
2535 for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) {
2536 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2538 ocs_dma_free(hw->os, &buf->payload.dma);
2541 ocs_unlock(&hw->io_lock);
2543 ocs_pool_free(hw->auto_xfer_rdy_buf_pool);
2544 hw->auto_xfer_rdy_buf_pool = NULL;
2549 * @ingroup devInitShutdown
2550 * @brief Configure the rq_pair function from ocs_hw_init().
2553 * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature.
2555 * @param hw Hardware context allocated by the caller.
2557 * @return Returns 0 on success, or a non-zero value on failure.
2560 ocs_hw_rqpair_init(ocs_hw_t *hw)
2563 uint32_t xris_posted;
2565 ocs_log_debug(hw->os, "RQ Pair mode\n");
2568 * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would
2569 * return with an error. So allocate the buffers based on the initial XRI pool required to support this
2572 if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
2573 hw->config.auto_xfer_rdy_size > 0) {
2574 if (hw->auto_xfer_rdy_buf_pool == NULL) {
2576 * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have
2577 * one to post back for the case where the response phase is started in the context of
2578 * the data completion.
2580 rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1);
2581 if (rc != OCS_HW_RTN_SUCCESS) {
2585 ocs_pool_reset(hw->auto_xfer_rdy_buf_pool);
2588 /* Post the auto XFR_RDY XRIs */
2589 xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt);
2590 if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) {
2591 ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted);
2592 return OCS_HW_RTN_ERROR;
2600 * @ingroup devInitShutdown
2601 * @brief Tear down the rq_pair function from ocs_hw_teardown().
2604 * Frees the buffers to auto xfer rdy.
2606 * @param hw Hardware context allocated by the caller.
2609 ocs_hw_rqpair_teardown(ocs_hw_t *hw)
2611 /* We need to free any auto xfer ready buffers */
2612 ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw);