]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ocs_fc/ocs_hw_queues.c
MFV r341771,342040,342041:
[FreeBSD/FreeBSD.git] / sys / dev / ocs_fc / ocs_hw_queues.c
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33
34 /**
35  * @file
36  *
37  */
38
39 #include "ocs_os.h"
40 #include "ocs_hw.h"
41 #include "ocs_hw_queues.h"
42
43 #define HW_QTOP_DEBUG           0
44
45 /**
46  * @brief Initialize queues
47  *
48  * Given the parsed queue topology spec, the SLI queues are created and
49  * initialized
50  *
51  * @param hw pointer to HW object
52  * @param qtop pointer to queue topology
53  *
54  * @return returns 0 for success, an error code value for failure.
55  */
56 ocs_hw_rtn_e
57 ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
58 {
59         uint32_t i, j;
60         uint32_t default_lengths[QTOP_LAST], len;
61         uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0;
62         uint8_t rqset_filter_mask = 0;
63         hw_eq_t *eqs[hw->config.n_rq];
64         hw_cq_t *cqs[hw->config.n_rq];
65         hw_rq_t *rqs[hw->config.n_rq];
66         ocs_hw_qtop_entry_t *qt, *next_qt;
67         ocs_hw_mrq_t mrq;
68         bool use_mrq = FALSE;
69
70         hw_eq_t *eq = NULL;
71         hw_cq_t *cq = NULL;
72         hw_wq_t *wq = NULL;
73         hw_rq_t *rq = NULL;
74         hw_mq_t *mq = NULL;
75
76         mrq.num_pairs = 0;
77         default_lengths[QTOP_EQ] = 1024;
78         default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ];
79         default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ];
80         default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ];
81         default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH;
82
83         ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG);
84
85         hw->eq_count = 0;
86         hw->cq_count = 0;
87         hw->mq_count = 0;
88         hw->wq_count = 0;
89         hw->rq_count = 0;
90         hw->hw_rq_count = 0;
91         ocs_list_init(&hw->eq_list, hw_eq_t, link);
92
93         /* If MRQ is requested, Check if it is supported by SLI. */
94         if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) {
95                 ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n");
96                 return OCS_HW_RTN_ERROR;
97         }
98
99         if (hw->config.n_rq > 1)
100                 use_mrq = TRUE;
101
102         /* Allocate class WQ pools */
103         for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
104                 hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
105                 if (hw->wq_class_array[i] == NULL) {
106                         ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
107                         return OCS_HW_RTN_NO_MEMORY;
108                 }
109         }
110
111         /* Allocate per CPU WQ pools */
112         for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
113                 hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
114                 if (hw->wq_cpu_array[i] == NULL) {
115                         ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
116                         return OCS_HW_RTN_NO_MEMORY;
117                 }
118         }
119
120
121         ocs_hw_assert(qtop != NULL);
122
123         for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
124                 if (i == qtop->inuse_count - 1)
125                         next_qt = NULL;
126                 else
127                         next_qt = qt + 1;
128
129                 switch(qt->entry) {
130                 case QTOP_EQ:
131                         len = (qt->len) ? qt->len : default_lengths[QTOP_EQ];
132
133                         if (qt->set_default) {
134                                 default_lengths[QTOP_EQ] = len;
135                                 break;
136                         }
137
138                         eq = hw_new_eq(hw, len);
139                         if (eq == NULL) {
140                                 hw_queue_teardown(hw);
141                                 return OCS_HW_RTN_NO_MEMORY;
142                         }
143                         break;
144
145                 case QTOP_CQ:
146                         len = (qt->len) ? qt->len : default_lengths[QTOP_CQ];
147
148                         if (qt->set_default) {
149                                 default_lengths[QTOP_CQ] = len;
150                                 break;
151                         }
152
153                         /* If this CQ is for MRQ, then delay the creation */
154                         if (!use_mrq || next_qt->entry != QTOP_RQ) {
155                                 cq = hw_new_cq(eq, len);
156                                 if (cq == NULL) {
157                                         hw_queue_teardown(hw);
158                                         return OCS_HW_RTN_NO_MEMORY;
159                                 }
160                         }
161                         break;
162
163                 case QTOP_WQ: {
164
165                         len = (qt->len) ? qt->len : default_lengths[QTOP_WQ];
166                         if (qt->set_default) {
167                                 default_lengths[QTOP_WQ] = len;
168                                 break;
169                         }
170
171                         if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
172                                 ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp);
173                                 hw_queue_teardown(hw);
174                                 return OCS_HW_RTN_NO_MEMORY;
175                         }
176
177                         wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp);
178                         if (wq == NULL) {
179                                 hw_queue_teardown(hw);
180                                 return OCS_HW_RTN_NO_MEMORY;
181                         }
182
183                         /* Place this WQ on the EQ WQ array */
184                         if (ocs_varray_add(eq->wq_array, wq)) {
185                                 ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n");
186                                 hw_queue_teardown(hw);
187                                 return OCS_HW_RTN_ERROR;
188                         }
189
190                         /* Place this WQ on the HW class array */
191                         if (qt->class < ARRAY_SIZE(hw->wq_class_array)) {
192                                 if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) {
193                                         ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n");
194                                         hw_queue_teardown(hw);
195                                         return OCS_HW_RTN_ERROR;
196                                 }
197                         } else {
198                                 ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class);
199                                 hw_queue_teardown(hw);
200                                 return OCS_HW_RTN_ERROR;
201                         }
202
203                         /*
204                          * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given
205                          * by the EQ instance modulo number of CPUs
206                          */
207                         if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) {
208                                 ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n");
209                                 hw_queue_teardown(hw);
210                                 return OCS_HW_RTN_ERROR;
211                         }
212
213                         break;
214                 }
215                 case QTOP_RQ: {
216                         len = (qt->len) ? qt->len : default_lengths[QTOP_RQ];
217                         if (qt->set_default) {
218                                 default_lengths[QTOP_RQ] = len;
219                                 break;
220                         }
221
222                         if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
223                                 ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp);
224                                 hw_queue_teardown(hw);
225                                 return OCS_HW_RTN_NO_MEMORY;
226                         }
227
228                         if (use_mrq) {
229                                 mrq.rq_cfg[mrq.num_pairs].len = len;
230                                 mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp; 
231                                 mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask;
232                                 mrq.rq_cfg[mrq.num_pairs].eq = eq;
233                                 mrq.num_pairs ++;
234                         } else {
235                                 rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp);
236                                 if (rq == NULL) {
237                                         hw_queue_teardown(hw);
238                                         return OCS_HW_RTN_NO_MEMORY;
239                                 }
240                                 rq->filter_mask = qt->filter_mask;
241                         }
242                         break;
243                 }
244
245                 case QTOP_MQ:
246                         len = (qt->len) ? qt->len : default_lengths[QTOP_MQ];
247                         if (qt->set_default) {
248                                 default_lengths[QTOP_MQ] = len;
249                                 break;
250                         }
251
252                         mq = hw_new_mq(cq, len);
253                         if (mq == NULL) {
254                                 hw_queue_teardown(hw);
255                                 return OCS_HW_RTN_NO_MEMORY;
256                         }
257                         break;
258
259                 default:
260                         ocs_hw_assert(0);
261                         break;
262                 }
263         }
264
265         if (mrq.num_pairs) {
266                 /* First create normal RQs. */
267                 for (i = 0; i < mrq.num_pairs; i++) {
268                         for (j = 0; j < mrq.num_pairs; j++) {
269                                 if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) {
270                                         /* This should be created using set */
271                                         if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) {
272                                                 ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n");
273                                                 hw_queue_teardown(hw);
274                                                 return OCS_HW_RTN_ERROR;
275                                         } else if (!rqset_filter_mask){
276                                                 rqset_filter_mask = mrq.rq_cfg[i].filter_mask;
277                                                 rqset_len = mrq.rq_cfg[i].len;
278                                                 rqset_ulp = mrq.rq_cfg[i].ulp;
279                                         }
280                                         eqs[rqset_count] = mrq.rq_cfg[i].eq;
281                                         rqset_count++;
282                                         break;
283                                 }
284                         }
285                         if (j == mrq.num_pairs) {
286                                 /* Normal RQ */
287                                 cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]);
288                                 if (cq == NULL) {
289                                         hw_queue_teardown(hw);
290                                         return OCS_HW_RTN_NO_MEMORY;
291                                 }
292
293                                 rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp);
294                                 if (rq == NULL) {
295                                         hw_queue_teardown(hw);
296                                         return OCS_HW_RTN_NO_MEMORY;
297                                 }
298                                 rq->filter_mask = mrq.rq_cfg[i].filter_mask;
299                         }
300                 }
301
302                 /* Now create RQ Set */
303                 if (rqset_count) {
304                         if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) {
305                                 ocs_log_crit(hw->os,
306                                              "Max Supported MRQ pairs = %d\n",
307                                              OCE_HW_MAX_NUM_MRQ_PAIRS);
308                                 hw_queue_teardown(hw);
309                                 return OCS_HW_RTN_ERROR;
310                         }
311
312                         /* Create CQ set */
313                         if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) {
314                                 hw_queue_teardown(hw);
315                                 return OCS_HW_RTN_ERROR;
316                         }
317
318                         /* Create RQ set */
319                         if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) {
320                                 hw_queue_teardown(hw);
321                                 return OCS_HW_RTN_ERROR;
322                         }
323
324                         for (i = 0; i < rqset_count ; i++) {
325                                 rqs[i]->filter_mask = rqset_filter_mask;
326                                 rqs[i]->is_mrq = TRUE;
327                                 rqs[i]->base_mrq_id = rqs[0]->hdr->id;
328                         }
329
330                         hw->hw_mrq_count = rqset_count;
331                 }
332         }
333
334         return OCS_HW_RTN_SUCCESS;
335
336 }
337
338 /**
339  * @brief Allocate a new EQ object
340  *
341  * A new EQ object is instantiated
342  *
343  * @param hw pointer to HW object
344  * @param entry_count number of entries in the EQ
345  *
346  * @return pointer to allocated EQ object
347  */
348 hw_eq_t*
349 hw_new_eq(ocs_hw_t *hw, uint32_t entry_count)
350 {
351         hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT);
352
353         if (eq != NULL) {
354                 eq->type = SLI_QTYPE_EQ;
355                 eq->hw = hw;
356                 eq->entry_count = entry_count;
357                 eq->instance = hw->eq_count++;
358                 eq->queue = &hw->eq[eq->instance];
359                 ocs_list_init(&eq->cq_list, hw_cq_t, link);
360
361                 eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
362                 if (eq->wq_array == NULL) {
363                         ocs_free(hw->os, eq, sizeof(*eq));
364                         eq = NULL;
365                 } else {
366                         if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) {
367                                 ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance);
368                                 ocs_free(hw->os, eq, sizeof(*eq));
369                                 eq = NULL;
370                         } else {
371                                 sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
372                                 hw->hw_eq[eq->instance] = eq;
373                                 ocs_list_add_tail(&hw->eq_list, eq);
374                                 ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id,
375                                         eq->entry_count);
376                         }
377                 }
378         }
379         return eq;
380 }
381
382 /**
383  * @brief Allocate a new CQ object
384  *
385  * A new CQ object is instantiated
386  *
387  * @param eq pointer to parent EQ object
388  * @param entry_count number of entries in the CQ
389  *
390  * @return pointer to allocated CQ object
391  */
392 hw_cq_t*
393 hw_new_cq(hw_eq_t *eq, uint32_t entry_count)
394 {
395         ocs_hw_t *hw = eq->hw;
396         hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
397
398         if (cq != NULL) {
399                 cq->eq = eq;
400                 cq->type = SLI_QTYPE_CQ;
401                 cq->instance = eq->hw->cq_count++;
402                 cq->entry_count = entry_count;
403                 cq->queue = &hw->cq[cq->instance];
404
405                 ocs_list_init(&cq->q_list, hw_q_t, link);
406
407                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) {
408                         ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
409                                 eq->instance,
410                                 eq->entry_count);
411                         ocs_free(hw->os, cq, sizeof(*cq));
412                         cq = NULL;
413                 } else {
414                         hw->hw_cq[cq->instance] = cq;
415                         ocs_list_add_tail(&eq->cq_list, cq);
416                         ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id,
417                                 cq->entry_count);
418                 }
419         }
420         return cq;
421 }
422
423 /**
424  * @brief Allocate a new CQ Set of objects.
425  *
426  * @param eqs pointer to a set of EQ objects.
427  * @param cqs pointer to a set of CQ objects to be returned.
428  * @param num_cqs number of CQ queues in the set.
429  * @param entry_count number of entries in the CQ.
430  *
431  * @return 0 on success and -1 on failure.
432  */
433 uint32_t
434 hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count)
435 {
436         uint32_t i;
437         ocs_hw_t *hw = eqs[0]->hw;
438         sli4_t *sli4 = &hw->sli;
439         hw_cq_t *cq = NULL;
440         sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT];
441
442         /* Initialise CQS pointers to NULL */
443         for (i = 0; i < num_cqs; i++) {
444                 cqs[i] = NULL;
445         }
446
447         for (i = 0; i < num_cqs; i++) {
448                 cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
449                 if (cq == NULL)
450                         goto error;
451
452                 cqs[i]          = cq;
453                 cq->eq          = eqs[i];
454                 cq->type        = SLI_QTYPE_CQ;
455                 cq->instance    = hw->cq_count++;
456                 cq->entry_count = entry_count;
457                 cq->queue       = &hw->cq[cq->instance];
458                 qs[i]           = cq->queue;
459                 assocs[i]       = eqs[i]->queue;
460                 ocs_list_init(&cq->q_list, hw_q_t, link);
461         }
462
463         if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) {
464                 ocs_log_err(NULL, "Failed to create CQ Set. \n");
465                 goto error;
466         }
467
468         for (i = 0; i < num_cqs; i++) {
469                 hw->hw_cq[cqs[i]->instance] = cqs[i];
470                 ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]);
471         }
472
473         return 0;
474
475 error:
476         for (i = 0; i < num_cqs; i++) {
477                 if (cqs[i]) {
478                         ocs_free(hw->os, cqs[i], sizeof(*cqs[i]));
479                         cqs[i] = NULL;
480                 }
481         }
482         return -1;
483 }
484
485
486 /**
487  * @brief Allocate a new MQ object
488  *
489  * A new MQ object is instantiated
490  *
491  * @param cq pointer to parent CQ object
492  * @param entry_count number of entries in the MQ
493  *
494  * @return pointer to allocated MQ object
495  */
496 hw_mq_t*
497 hw_new_mq(hw_cq_t *cq, uint32_t entry_count)
498 {
499         ocs_hw_t *hw = cq->eq->hw;
500         hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT);
501
502         if (mq != NULL) {
503                 mq->cq = cq;
504                 mq->type = SLI_QTYPE_MQ;
505                 mq->instance = cq->eq->hw->mq_count++;
506                 mq->entry_count = entry_count;
507                 mq->entry_size = OCS_HW_MQ_DEPTH;
508                 mq->queue = &hw->mq[mq->instance];
509
510                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ,
511                                     mq->queue,
512                                     mq->entry_size,
513                                     cq->queue, 0)) {
514                         ocs_log_err(hw->os, "MQ allocation failure\n");
515                         ocs_free(hw->os, mq, sizeof(*mq));
516                         mq = NULL;
517                 } else {
518                         hw->hw_mq[mq->instance] = mq;
519                         ocs_list_add_tail(&cq->q_list, mq);
520                         ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id,
521                                 mq->entry_count);
522                 }
523         }
524         return mq;
525 }
526
527 /**
528  * @brief Allocate a new WQ object
529  *
530  * A new WQ object is instantiated
531  *
532  * @param cq pointer to parent CQ object
533  * @param entry_count number of entries in the WQ
534  * @param class WQ class
535  * @param ulp index of chute
536  *
537  * @return pointer to allocated WQ object
538  */
539 hw_wq_t*
540 hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp)
541 {
542         ocs_hw_t *hw = cq->eq->hw;
543         hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT);
544
545         if (wq != NULL) {
546                 wq->hw = cq->eq->hw;
547                 wq->cq = cq;
548                 wq->type = SLI_QTYPE_WQ;
549                 wq->instance = cq->eq->hw->wq_count++;
550                 wq->entry_count = entry_count;
551                 wq->queue = &hw->wq[wq->instance];
552                 wq->ulp = ulp;
553                 wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT;
554                 wq->wqec_count = wq->wqec_set_count;
555                 wq->free_count = wq->entry_count - 1;
556                 wq->class = class;
557                 ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link);
558
559                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) {
560                         ocs_log_err(hw->os, "WQ allocation failure\n");
561                         ocs_free(hw->os, wq, sizeof(*wq));
562                         wq = NULL;
563                 } else {
564                         hw->hw_wq[wq->instance] = wq;
565                         ocs_list_add_tail(&cq->q_list, wq);
566                         ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id,
567                                 wq->entry_count, wq->class, wq->ulp);
568                 }
569         }
570         return wq;
571 }
572
573 /**
574  * @brief Allocate a hw_rq_t object
575  *
576  * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair)
577  *
578  * @param cq pointer to parent CQ object
579  * @param entry_count number of entries in the RQs
580  * @param ulp ULP index for this RQ
581  *
582  * @return pointer to newly allocated hw_rq_t
583  */
584 hw_rq_t*
585 hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp)
586 {
587         ocs_hw_t *hw = cq->eq->hw;
588         hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
589         uint32_t max_hw_rq;
590
591         ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
592
593
594         if (rq != NULL) {
595                 rq->instance = hw->hw_rq_count++;
596                 rq->cq = cq;
597                 rq->type = SLI_QTYPE_RQ;
598                 rq->ulp = ulp;
599
600                 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
601
602                 /* Create the header RQ */
603                 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
604                 rq->hdr = &hw->rq[hw->rq_count];
605                 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
606
607                 if (sli_fc_rq_alloc(&hw->sli, rq->hdr,
608                                     rq->entry_count,
609                                     rq->hdr_entry_size,
610                                     cq->queue,
611                                     ulp, TRUE)) {
612                         ocs_log_err(hw->os, "RQ allocation failure - header\n");
613                         ocs_free(hw->os, rq, sizeof(*rq));
614                         return NULL;
615                 }
616                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;  /* Update hw_rq_lookup[] */
617                 hw->rq_count++;
618                 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr  size %4d ulp %d\n",
619                         rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp);
620
621                 /* Create the default data RQ */
622                 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
623                 rq->data = &hw->rq[hw->rq_count];
624                 rq->data_entry_size = hw->config.rq_default_buffer_size;
625
626                 if (sli_fc_rq_alloc(&hw->sli, rq->data,
627                                     rq->entry_count,
628                                     rq->data_entry_size,
629                                     cq->queue,
630                                     ulp, FALSE)) {
631                         ocs_log_err(hw->os, "RQ allocation failure - first burst\n");
632                         ocs_free(hw->os, rq, sizeof(*rq));
633                         return NULL;
634                 }
635                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;  /* Update hw_rq_lookup[] */
636                 hw->rq_count++;
637                 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance,
638                         rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp);
639
640                 hw->hw_rq[rq->instance] = rq;
641                 ocs_list_add_tail(&cq->q_list, rq);
642
643                 rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
644                                             rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
645                 if (rq->rq_tracker == NULL) {
646                         ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
647                         return NULL;
648                 }
649         }
650         return rq;
651 }
652
653
654 /**
655  * @brief Allocate a hw_rq_t object SET
656  *
657  * Allocate an RQ object SET, where each element in set
658  * encapsulates 2 SLI queues (for rq pair)
659  *
660  * @param cqs pointers to be associated with RQs.
661  * @param rqs RQ pointers to be returned on success.
662  * @param num_rq_pairs number of rq pairs in the Set.
663  * @param entry_count number of entries in the RQs
664  * @param ulp ULP index for this RQ
665  *
666  * @return 0 in success and -1 on failure.
667  */
668 uint32_t
669 hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp)
670 {
671         ocs_hw_t *hw = cqs[0]->eq->hw;
672         hw_rq_t *rq = NULL;
673         sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL };
674         uint32_t max_hw_rq, i, q_count;
675
676         ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
677
678         /* Initialise RQS pointers */
679         for (i = 0; i < num_rq_pairs; i++) {
680                 rqs[i] = NULL;
681         }
682
683         for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
684                 rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
685                 if (rq == NULL)
686                         goto error;
687
688                 rqs[i] = rq;
689                 rq->instance = hw->hw_rq_count++;
690                 rq->cq = cqs[i];
691                 rq->type = SLI_QTYPE_RQ;
692                 rq->ulp = ulp;
693                 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
694
695                 /* Header RQ */
696                 rq->hdr = &hw->rq[hw->rq_count];
697                 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
698                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
699                 hw->rq_count++;
700                 qs[q_count] = rq->hdr;
701
702                 /* Data RQ */
703                 rq->data = &hw->rq[hw->rq_count];
704                 rq->data_entry_size = hw->config.rq_default_buffer_size;
705                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
706                 hw->rq_count++;
707                 qs[q_count + 1] = rq->data;
708
709                 rq->rq_tracker = NULL;
710         }
711
712         if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
713                             cqs[0]->queue->id,
714                             rqs[0]->entry_count,
715                             rqs[0]->hdr_entry_size,
716                             rqs[0]->data_entry_size,
717                             ulp)) {
718                 ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id);
719                 goto error;
720         }
721
722
723         for (i = 0; i < num_rq_pairs; i++) {
724                 hw->hw_rq[rqs[i]->instance] = rqs[i];
725                 ocs_list_add_tail(&cqs[i]->q_list, rqs[i]);
726                 rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
727                                             rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
728                 if (rqs[i]->rq_tracker == NULL) {
729                         ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
730                         goto error;
731                 }
732         }
733
734         return 0;
735
736 error:
737         for (i = 0; i < num_rq_pairs; i++) {
738                 if (rqs[i] != NULL) {
739                         if (rqs[i]->rq_tracker != NULL) {
740                                 ocs_free(hw->os, rq->rq_tracker,
741                                          sizeof(ocs_hw_sequence_t*) * rq->entry_count);
742                         }
743                         ocs_free(hw->os, rqs[i], sizeof(*rqs[i]));
744                 }
745         }
746
747         return -1;
748 }
749
750
751 /**
752  * @brief Free an EQ object
753  *
754  * The EQ object and any child queue objects are freed
755  *
756  * @param eq pointer to EQ object
757  *
758  * @return none
759  */
760 void
761 hw_del_eq(hw_eq_t *eq)
762 {
763         if (eq != NULL) {
764                 hw_cq_t *cq;
765                 hw_cq_t *cq_next;
766
767                 ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) {
768                         hw_del_cq(cq);
769                 }
770                 ocs_varray_free(eq->wq_array);
771                 ocs_list_remove(&eq->hw->eq_list, eq);
772                 eq->hw->hw_eq[eq->instance] = NULL;
773                 ocs_free(eq->hw->os, eq, sizeof(*eq));
774         }
775 }
776
777 /**
778  * @brief Free a CQ object
779  *
780  * The CQ object and any child queue objects are freed
781  *
782  * @param cq pointer to CQ object
783  *
784  * @return none
785  */
786 void
787 hw_del_cq(hw_cq_t *cq)
788 {
789         if (cq != NULL) {
790                 hw_q_t *q;
791                 hw_q_t *q_next;
792
793                 ocs_list_foreach_safe(&cq->q_list, q, q_next) {
794                         switch(q->type) {
795                         case SLI_QTYPE_MQ:
796                                 hw_del_mq((hw_mq_t*) q);
797                                 break;
798                         case SLI_QTYPE_WQ:
799                                 hw_del_wq((hw_wq_t*) q);
800                                 break;
801                         case SLI_QTYPE_RQ:
802                                 hw_del_rq((hw_rq_t*) q);
803                                 break;
804                         default:
805                                 break;
806                         }
807                 }
808                 ocs_list_remove(&cq->eq->cq_list, cq);
809                 cq->eq->hw->hw_cq[cq->instance] = NULL;
810                 ocs_free(cq->eq->hw->os, cq, sizeof(*cq));
811         }
812 }
813
814 /**
815  * @brief Free a MQ object
816  *
817  * The MQ object is freed
818  *
819  * @param mq pointer to MQ object
820  *
821  * @return none
822  */
823 void
824 hw_del_mq(hw_mq_t *mq)
825 {
826         if (mq != NULL) {
827                 ocs_list_remove(&mq->cq->q_list, mq);
828                 mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
829                 ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq));
830         }
831 }
832
833 /**
834  * @brief Free a WQ object
835  *
836  * The WQ object is freed
837  *
838  * @param wq pointer to WQ object
839  *
840  * @return none
841  */
842 void
843 hw_del_wq(hw_wq_t *wq)
844 {
845         if (wq != NULL) {
846                 ocs_list_remove(&wq->cq->q_list, wq);
847                 wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
848                 ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq));
849         }
850 }
851
852 /**
853  * @brief Free an RQ object
854  *
855  * The RQ object is freed
856  *
857  * @param rq pointer to RQ object
858  *
859  * @return none
860  */
861 void
862 hw_del_rq(hw_rq_t *rq)
863 {
864         ocs_hw_t *hw = rq->cq->eq->hw;
865
866         if (rq != NULL) {
867                 /* Free RQ tracker */
868                 if (rq->rq_tracker != NULL) {
869                         ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count);
870                         rq->rq_tracker = NULL;
871                 }
872                 ocs_list_remove(&rq->cq->q_list, rq);
873                 hw->hw_rq[rq->instance] = NULL;
874                 ocs_free(hw->os, rq, sizeof(*rq));
875         }
876 }
877
878 /**
879  * @brief Display HW queue objects
880  *
881  * The HW queue objects are displayed using ocs_log
882  *
883  * @param hw pointer to HW object
884  *
885  * @return none
886  */
887 void
888 hw_queue_dump(ocs_hw_t *hw)
889 {
890         hw_eq_t *eq;
891         hw_cq_t *cq;
892         hw_q_t *q;
893         hw_mq_t *mq;
894         hw_wq_t *wq;
895         hw_rq_t *rq;
896
897         ocs_list_foreach(&hw->eq_list, eq) {
898                 ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id);
899                 ocs_list_foreach(&eq->cq_list, cq) {
900                         ocs_printf("  cq[%d] id %2d current\n", cq->instance, cq->queue->id);
901                         ocs_list_foreach(&cq->q_list, q) {
902                                 switch(q->type) {
903                                 case SLI_QTYPE_MQ:
904                                         mq = (hw_mq_t *) q;
905                                         ocs_printf("    mq[%d] id %2d\n", mq->instance, mq->queue->id);
906                                         break;
907                                 case SLI_QTYPE_WQ:
908                                         wq = (hw_wq_t *) q;
909                                         ocs_printf("    wq[%d] id %2d\n", wq->instance, wq->queue->id);
910                                         break;
911                                 case SLI_QTYPE_RQ:
912                                         rq = (hw_rq_t *) q;
913                                         ocs_printf("    rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id);
914                                         break;
915                                 default:
916                                         break;
917                                 }
918                         }
919                 }
920         }
921 }
922
923 /**
924  * @brief Teardown HW queue objects
925  *
926  * The HW queue objects are freed
927  *
928  * @param hw pointer to HW object
929  *
930  * @return none
931  */
932 void
933 hw_queue_teardown(ocs_hw_t *hw)
934 {
935         uint32_t i;
936         hw_eq_t *eq;
937         hw_eq_t *eq_next;
938
939         if (ocs_list_valid(&hw->eq_list)) {
940                 ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) {
941                         hw_del_eq(eq);
942                 }
943         }
944         for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
945                 ocs_varray_free(hw->wq_cpu_array[i]);
946                 hw->wq_cpu_array[i] = NULL;
947         }
948         for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
949                 ocs_varray_free(hw->wq_class_array[i]);
950                 hw->wq_class_array[i] = NULL;
951         }
952 }
953
954 /**
955  * @brief Allocate a WQ to an IO object
956  *
957  * The next work queue index is used to assign a WQ to an IO.
958  *
959  * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is
960  * selected.
961  *
962  * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that
963  * the IO request came in on is selected.
964  *
965  * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associted with the
966  * CPU the request is made on is selected.
967  *
968  * @param hw pointer to HW object
969  * @param io pointer to IO object
970  *
971  * @return Return pointer to next WQ
972  */
973 hw_wq_t *
974 ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io)
975 {
976         hw_eq_t *eq;
977         hw_wq_t *wq = NULL;
978
979         switch(io->wq_steering) {
980         case OCS_HW_WQ_STEERING_CLASS:
981                 if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) {
982                         wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]);
983                 }
984                 break;
985         case OCS_HW_WQ_STEERING_REQUEST:
986                 eq = io->eq;
987                 if (likely(eq != NULL)) {
988                         wq = ocs_varray_iter_next(eq->wq_array);
989                 }
990                 break;
991         case OCS_HW_WQ_STEERING_CPU: {
992                 uint32_t cpuidx = ocs_thread_getcpu();
993
994                 if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) {
995                         wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]);
996                 }
997                 break;
998         }
999         }
1000
1001         if (unlikely(wq == NULL)) {
1002                 wq = hw->hw_wq[0];
1003         }
1004
1005         return wq;
1006 }
1007
1008 /**
1009  * @brief Return count of EQs for a queue topology object
1010  *
1011  * The EQ count for in the HWs queue topology (hw->qtop) object is returned
1012  *
1013  * @param hw pointer to HW object
1014  *
1015  * @return count of EQs
1016  */
1017 uint32_t
1018 ocs_hw_qtop_eq_count(ocs_hw_t *hw)
1019 {
1020         return hw->qtop->entry_counts[QTOP_EQ];
1021 }
1022
1023 #define TOKEN_LEN               32
1024
1025 /**
1026  * @brief return string given a QTOP entry
1027  *
1028  * @param entry QTOP entry
1029  *
1030  * @return returns string or "unknown"
1031  */
1032 #if HW_QTOP_DEBUG
1033 static char *
1034 qtopentry2s(ocs_hw_qtop_entry_e entry) {
1035         switch(entry) {
1036         #define P(x)    case x: return #x;
1037         P(QTOP_EQ)
1038         P(QTOP_CQ)
1039         P(QTOP_WQ)
1040         P(QTOP_RQ)
1041         P(QTOP_MQ)
1042         P(QTOP_THREAD_START)
1043         P(QTOP_THREAD_END)
1044         P(QTOP_LAST)
1045         #undef P
1046         }
1047         return "unknown";
1048 }
1049 #endif
1050
1051 /**
1052  * @brief Declare token types
1053  */
1054 typedef enum {
1055         TOK_LPAREN = 1,
1056         TOK_RPAREN,
1057         TOK_COLON,
1058         TOK_EQUALS,
1059         TOK_QUEUE,
1060         TOK_ATTR_NAME,
1061         TOK_NUMBER,
1062         TOK_NUMBER_VALUE,
1063         TOK_NUMBER_LIST,
1064 } tok_type_e;
1065
1066 /**
1067  * @brief Declare token sub-types
1068  */
1069 typedef enum {
1070         TOK_SUB_EQ = 100,
1071         TOK_SUB_CQ,
1072         TOK_SUB_RQ,
1073         TOK_SUB_MQ,
1074         TOK_SUB_WQ,
1075         TOK_SUB_LEN,
1076         TOK_SUB_CLASS,
1077         TOK_SUB_ULP,
1078         TOK_SUB_FILTER,
1079 } tok_subtype_e;
1080
1081 /**
1082  * @brief convert queue subtype to QTOP entry
1083  *
1084  * @param q queue subtype
1085  *
1086  * @return QTOP entry or 0
1087  */
1088 static ocs_hw_qtop_entry_e
1089 subtype2qtop(tok_subtype_e q)
1090 {
1091         switch(q) {
1092         case TOK_SUB_EQ:        return QTOP_EQ;
1093         case TOK_SUB_CQ:        return QTOP_CQ;
1094         case TOK_SUB_RQ:        return QTOP_RQ;
1095         case TOK_SUB_MQ:        return QTOP_MQ;
1096         case TOK_SUB_WQ:        return QTOP_WQ;
1097         default:
1098                 break;
1099         }
1100         return 0;
1101 }
1102
1103 /**
1104  * @brief Declare token object
1105  */
1106 typedef struct {
1107         tok_type_e type;
1108         tok_subtype_e subtype;
1109         char string[TOKEN_LEN];
1110 } tok_t;
1111
1112 /**
1113  * @brief Declare token array object
1114  */
1115 typedef struct {
1116         tok_t *tokens;                  /* Pointer to array of tokens */
1117         uint32_t alloc_count;           /* Number of tokens in the array */
1118         uint32_t inuse_count;           /* Number of tokens posted to array */
1119         uint32_t iter_idx;              /* Iterator index */
1120 } tokarray_t;
1121
1122 /**
1123  * @brief Declare token match structure
1124  */
1125 typedef struct {
1126         char *s;
1127         tok_type_e type;
1128         tok_subtype_e subtype;
1129 } tokmatch_t;
1130
1131 /**
1132  * @brief test if character is ID start character
1133  *
1134  * @param c character to test
1135  *
1136  * @return TRUE if character is an ID start character
1137  */
1138 static int32_t
1139 idstart(int c)
1140 {
1141         return  isalpha(c) || (c == '_') || (c == '$');
1142 }
1143
1144 /**
1145  * @brief test if character is an ID character
1146  *
1147  * @param c character to test
1148  *
1149  * @return TRUE if character is an ID character
1150  */
1151 static int32_t
1152 idchar(int c)
1153 {
1154         return idstart(c) || ocs_isdigit(c);
1155 }
1156
1157 /**
1158  * @brief Declare single character matches
1159  */
1160 static tokmatch_t cmatches[] = {
1161         {"(", TOK_LPAREN},
1162         {")", TOK_RPAREN},
1163         {":", TOK_COLON},
1164         {"=", TOK_EQUALS},
1165 };
1166
1167 /**
1168  * @brief Declare identifier match strings
1169  */
1170 static tokmatch_t smatches[] = {
1171         {"eq", TOK_QUEUE, TOK_SUB_EQ},
1172         {"cq", TOK_QUEUE, TOK_SUB_CQ},
1173         {"rq", TOK_QUEUE, TOK_SUB_RQ},
1174         {"mq", TOK_QUEUE, TOK_SUB_MQ},
1175         {"wq", TOK_QUEUE, TOK_SUB_WQ},
1176         {"len", TOK_ATTR_NAME, TOK_SUB_LEN},
1177         {"class", TOK_ATTR_NAME, TOK_SUB_CLASS},
1178         {"ulp", TOK_ATTR_NAME, TOK_SUB_ULP},
1179         {"filter", TOK_ATTR_NAME, TOK_SUB_FILTER},
1180 };
1181
1182 /**
1183  * @brief Scan string and return next token
1184  *
1185  * The string is scanned and the next token is returned
1186  *
1187  * @param s input string to scan
1188  * @param tok pointer to place scanned token
1189  *
1190  * @return pointer to input string following scanned token, or NULL
1191  */
1192 static const char *
1193 tokenize(const char *s, tok_t *tok)
1194 {
1195         uint32_t i;
1196
1197         memset(tok, 0, sizeof(*tok));
1198
1199         /* Skip over whitespace */
1200         while (*s && ocs_isspace(*s)) {
1201                 s++;
1202         }
1203
1204         /* Return if nothing left in this string */
1205         if (*s == 0) {
1206                 return NULL;
1207         }
1208
1209         /* Look for single character matches */
1210         for (i = 0; i < ARRAY_SIZE(cmatches); i++) {
1211                 if (cmatches[i].s[0] == *s) {
1212                         tok->type = cmatches[i].type;
1213                         tok->subtype = cmatches[i].subtype;
1214                         tok->string[0] = *s++;
1215                         return s;
1216                 }
1217         }
1218
1219         /* Scan for a hex number or decimal */
1220         if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) {
1221                 char *p = tok->string;
1222
1223                 tok->type = TOK_NUMBER;
1224
1225                 *p++ = *s++;
1226                 *p++ = *s++;
1227                 while ((*s == '.') || ocs_isxdigit(*s)) {
1228                         if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1229                                 *p++ = *s;
1230                         }
1231                         if (*s == ',') {
1232                                 tok->type = TOK_NUMBER_LIST;
1233                         }
1234                         s++;
1235                 }
1236                 *p = 0;
1237                 return s;
1238         } else if (ocs_isdigit(*s)) {
1239                 char *p = tok->string;
1240
1241                 tok->type = TOK_NUMBER;
1242                 while ((*s == ',') || ocs_isdigit(*s)) {
1243                         if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1244                                 *p++ = *s;
1245                         }
1246                         if (*s == ',') {
1247                                 tok->type = TOK_NUMBER_LIST;
1248                         }
1249                         s++;
1250                 }
1251                 *p = 0;
1252                 return s;
1253         }
1254
1255         /* Scan for an ID */
1256         if (idstart(*s)) {
1257                 char *p = tok->string;
1258
1259                 for (*p++ = *s++; idchar(*s); s++) {
1260                         if ((p - tok->string) < TOKEN_LEN) {
1261                                 *p++ = *s;
1262                         }
1263                 }
1264
1265                 /* See if this is a $ number value */
1266                 if (tok->string[0] == '$') {
1267                         tok->type = TOK_NUMBER_VALUE;
1268                 } else {
1269                         /* Look for a string match */
1270                         for (i = 0; i < ARRAY_SIZE(smatches); i++) {
1271                                 if (strcmp(smatches[i].s, tok->string) == 0) {
1272                                         tok->type = smatches[i].type;
1273                                         tok->subtype = smatches[i].subtype;
1274                                         return s;
1275                                 }
1276                         }
1277                 }
1278         }
1279         return s;
1280 }
1281
1282 /**
1283  * @brief convert token type to string
1284  *
1285  * @param type token type
1286  *
1287  * @return string, or "unknown"
1288  */
1289 static const char *
1290 token_type2s(tok_type_e type)
1291 {
1292         switch(type) {
1293         #define P(x)    case x: return #x;
1294         P(TOK_LPAREN)
1295         P(TOK_RPAREN)
1296         P(TOK_COLON)
1297         P(TOK_EQUALS)
1298         P(TOK_QUEUE)
1299         P(TOK_ATTR_NAME)
1300         P(TOK_NUMBER)
1301         P(TOK_NUMBER_VALUE)
1302         P(TOK_NUMBER_LIST)
1303         #undef P
1304         }
1305         return "unknown";
1306 }
1307
1308 /**
1309  * @brief convert token sub-type to string
1310  *
1311  * @param subtype token sub-type
1312  *
1313  * @return string, or "unknown"
1314  */
1315 static const char *
1316 token_subtype2s(tok_subtype_e subtype)
1317 {
1318         switch(subtype) {
1319         #define P(x)    case x: return #x;
1320         P(TOK_SUB_EQ)
1321         P(TOK_SUB_CQ)
1322         P(TOK_SUB_RQ)
1323         P(TOK_SUB_MQ)
1324         P(TOK_SUB_WQ)
1325         P(TOK_SUB_LEN)
1326         P(TOK_SUB_CLASS)
1327         P(TOK_SUB_ULP)
1328         P(TOK_SUB_FILTER)
1329         #undef P
1330         }
1331         return "";
1332 }
1333
1334 /**
1335  * @brief Generate syntax error message
1336  *
1337  * A syntax error message is found, the input tokens are dumped up to and including
1338  * the token that failed as indicated by the current iterator index.
1339  *
1340  * @param hw pointer to HW object
1341  * @param tokarray pointer to token array object
1342  *
1343  * @return none
1344  */
1345 static void
1346 tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray)
1347 {
1348         uint32_t i;
1349         tok_t *tok;
1350
1351         ocs_log_test(hw->os, "Syntax error:\n");
1352
1353         for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) {
1354                 ocs_log_test(hw->os, "%s [%2d]    %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : "   ", i,
1355                         token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string);
1356         }
1357 }
1358
1359 /**
1360  * @brief parse a number
1361  *
1362  * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value
1363  *
1364  * @param hw pointer to HW object
1365  * @param qtop pointer to QTOP object
1366  * @param tok pointer to token to parse
1367  *
1368  * @return numeric value
1369  */
1370 static uint32_t
1371 tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok)
1372 {
1373         uint32_t rval = 0;
1374         uint32_t num_cpus = ocs_get_num_cpus();
1375
1376         switch(tok->type) {
1377         case TOK_NUMBER_VALUE:
1378                 if (ocs_strcmp(tok->string, "$ncpu") == 0) {
1379                         rval = num_cpus;
1380                 } else if (ocs_strcmp(tok->string, "$ncpu1") == 0) {
1381                         rval = num_cpus - 1;
1382                 } else if (ocs_strcmp(tok->string, "$nwq") == 0) {
1383                         if (hw != NULL) {
1384                                 rval = hw->config.n_wq;
1385                         }
1386                 } else if (ocs_strcmp(tok->string, "$maxmrq") == 0) {
1387                         rval = MIN(num_cpus, OCS_HW_MAX_MRQS);
1388                 } else if (ocs_strcmp(tok->string, "$nulp") == 0) {
1389                         rval = hw->ulp_max - hw->ulp_start + 1;
1390                 } else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) {
1391                         rval = qtop->rptcount[qtop->rptcount_idx-1];
1392                 } else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) {
1393                         rval = qtop->rptcount[qtop->rptcount_idx-2];
1394                 } else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) {
1395                         rval = qtop->rptcount[qtop->rptcount_idx-3];
1396                 } else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) {
1397                         rval = qtop->rptcount[qtop->rptcount_idx-4];
1398                 } else {
1399                         rval = ocs_strtoul(tok->string, 0, 0);
1400                 }
1401                 break;
1402         case TOK_NUMBER:
1403                 rval = ocs_strtoul(tok->string, 0, 0);
1404                 break;
1405         default:
1406                 break;
1407         }
1408         return rval;
1409 }
1410
1411
1412 /**
1413  * @brief parse an array of tokens
1414  *
1415  * The tokens are semantically parsed, to generate QTOP entries.
1416  *
1417  * @param hw pointer to HW object
1418  * @param tokarray array array of tokens
1419  * @param qtop ouptut QTOP object
1420  *
1421  * @return returns 0 for success, a negative error code value for failure.
1422  */
1423 static int32_t
1424 parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop)
1425 {
1426         ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count;
1427         tok_t *tok;
1428
1429         for (; (tokarray->iter_idx < tokarray->inuse_count) &&
1430              ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) {
1431                 if (qtop->inuse_count >= qtop->alloc_count) {
1432                         return -1;
1433                 }
1434
1435                 qt = qtop->entries + qtop->inuse_count;
1436
1437                 switch (tok[0].type)
1438                 {
1439                 case TOK_QUEUE:
1440                         qt->entry = subtype2qtop(tok[0].subtype);
1441                         qt->set_default = FALSE;
1442                         qt->len = 0;
1443                         qt->class = 0;
1444                         qtop->inuse_count++;
1445
1446                         tokarray->iter_idx++;           /* Advance current token index */
1447
1448                         /* Parse for queue attributes, possibly multiple instances */
1449                         while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) {
1450                                 tok = &tokarray->tokens[tokarray->iter_idx];
1451                                 if(     (tok[0].type == TOK_COLON) &&
1452                                         (tok[1].type == TOK_ATTR_NAME) &&
1453                                         (tok[2].type == TOK_EQUALS) &&
1454                                         ((tok[3].type == TOK_NUMBER) ||
1455                                          (tok[3].type == TOK_NUMBER_VALUE) ||
1456                                          (tok[3].type == TOK_NUMBER_LIST))) {
1457
1458                                         switch (tok[1].subtype) {
1459                                         case TOK_SUB_LEN:
1460                                                 qt->len = tok_getnumber(hw, qtop, &tok[3]);
1461                                                 break;
1462
1463                                         case TOK_SUB_CLASS:
1464                                                 qt->class = tok_getnumber(hw, qtop, &tok[3]);
1465                                                 break;
1466
1467                                         case TOK_SUB_ULP:
1468                                                 qt->ulp = tok_getnumber(hw, qtop, &tok[3]);
1469                                                 break;
1470
1471                                         case TOK_SUB_FILTER:
1472                                                 if (tok[3].type == TOK_NUMBER_LIST) {
1473                                                         uint32_t mask = 0;
1474                                                         char *p = tok[3].string;
1475
1476                                                         while ((p != NULL) && *p) {
1477                                                                 uint32_t v;
1478
1479                                                                 v = ocs_strtoul(p, 0, 0);
1480                                                                 if (v < 32) {
1481                                                                         mask |= (1U << v);
1482                                                                 }
1483
1484                                                                 p = ocs_strchr(p, ',');
1485                                                                 if (p != NULL) {
1486                                                                         p++;
1487                                                                 }
1488                                                         }
1489                                                         qt->filter_mask = mask;
1490                                                 } else {
1491                                                         qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3]));
1492                                                 }
1493                                                 break;
1494                                         default:
1495                                                 break;
1496                                         }
1497                                         /* Advance current token index */
1498                                         tokarray->iter_idx += 4;
1499                                 } else {
1500                                         break;
1501                                 }
1502                         }
1503                         qtop->entry_counts[qt->entry]++;
1504                         break;
1505
1506                 case TOK_ATTR_NAME:
1507                         if (    ((tokarray->iter_idx + 5) <= tokarray->inuse_count) &&
1508                                 (tok[1].type == TOK_COLON) &&
1509                                 (tok[2].type == TOK_QUEUE) &&
1510                                 (tok[3].type == TOK_EQUALS) &&
1511                                 ((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) {
1512                                 qt->entry = subtype2qtop(tok[2].subtype);
1513                                 qt->set_default = TRUE;
1514                                 switch(tok[0].subtype) {
1515                                 case TOK_SUB_LEN:
1516                                         qt->len = tok_getnumber(hw, qtop, &tok[4]);
1517                                         break;
1518                                 case TOK_SUB_CLASS:
1519                                         qt->class = tok_getnumber(hw, qtop, &tok[4]);
1520                                         break;
1521                                 case TOK_SUB_ULP:
1522                                         qt->ulp = tok_getnumber(hw, qtop, &tok[4]);
1523                                         break;
1524                                 default:
1525                                         break;
1526                                 }
1527                                 qtop->inuse_count++;
1528                                 tokarray->iter_idx += 5;
1529                         } else {
1530                                 tok_syntax(hw, tokarray);
1531                                 return -1;
1532                         }
1533                         break;
1534
1535                 case TOK_NUMBER:
1536                 case TOK_NUMBER_VALUE: {
1537                         uint32_t rpt_count = 1;
1538                         uint32_t i;
1539
1540                         rpt_count = tok_getnumber(hw, qtop, tok);
1541
1542                         if (tok[1].type == TOK_LPAREN) {
1543                                 uint32_t iter_idx_save;
1544
1545                                 tokarray->iter_idx += 2;
1546
1547                                 /* save token array iteration index */
1548                                 iter_idx_save = tokarray->iter_idx;
1549
1550                                 for (i = 0; i < rpt_count; i++) {
1551                                         uint32_t rptcount_idx = qtop->rptcount_idx;
1552
1553                                         if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) {
1554                                                 qtop->rptcount[qtop->rptcount_idx++] = i;
1555                                         }
1556
1557                                         /* restore token array iteration index */
1558                                         tokarray->iter_idx = iter_idx_save;
1559
1560                                         /* parse, append to qtop */
1561                                         parse_topology(hw, tokarray, qtop);
1562
1563                                         qtop->rptcount_idx = rptcount_idx;
1564                                 }
1565                         }
1566                         break;
1567                 }
1568
1569                 case TOK_RPAREN:
1570                         tokarray->iter_idx++;
1571                         return 0;
1572
1573                 default:
1574                         tok_syntax(hw, tokarray);
1575                         return -1;
1576                 }
1577         }
1578         return 0;
1579 }
1580
1581 /**
1582  * @brief Parse queue topology string
1583  *
1584  * The queue topology object is allocated, and filled with the results of parsing the
1585  * passed in queue topology string
1586  *
1587  * @param hw pointer to HW object
1588  * @param qtop_string input queue topology string
1589  *
1590  * @return pointer to allocated QTOP object, or NULL if there was an error
1591  */
1592 ocs_hw_qtop_t *
1593 ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string)
1594 {
1595         ocs_hw_qtop_t *qtop;
1596         tokarray_t tokarray;
1597         const char *s;
1598 #if HW_QTOP_DEBUG
1599         uint32_t i;
1600         ocs_hw_qtop_entry_t *qt;
1601 #endif
1602
1603         ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string);
1604
1605         /* Allocate a token array */
1606         tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT);
1607         if (tokarray.tokens == NULL) {
1608                 return NULL;
1609         }
1610         tokarray.alloc_count = MAX_TOKENS;
1611         tokarray.inuse_count = 0;
1612         tokarray.iter_idx = 0;
1613
1614         /* Parse the tokens */
1615         for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) &&
1616              ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) {
1617                 tokarray.inuse_count++;;
1618         }
1619
1620         /* Allocate a queue topology structure */
1621         qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT);
1622         if (qtop == NULL) {
1623                 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1624                 ocs_log_err(hw->os, "malloc qtop failed\n");
1625                 return NULL;
1626         }
1627         qtop->os = hw->os;
1628
1629         /* Allocate queue topology entries */
1630         qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT);
1631         if (qtop->entries == NULL) {
1632                 ocs_log_err(hw->os, "malloc qtop entries failed\n");
1633                 ocs_free(hw->os, qtop, sizeof(*qtop));
1634                 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1635                 return NULL;
1636         }
1637         qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES;
1638         qtop->inuse_count = 0;
1639
1640         /* Parse the tokens */
1641         parse_topology(hw, &tokarray, qtop);
1642 #if HW_QTOP_DEBUG
1643         for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
1644                 ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len,
1645                        qt->class, qt->ulp);
1646         }
1647 #endif
1648
1649         /* Free the tokens array */
1650         ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1651
1652         return qtop;
1653 }
1654
1655 /**
1656  * @brief free queue topology object
1657  *
1658  * @param qtop pointer to QTOP object
1659  *
1660  * @return none
1661  */
1662 void
1663 ocs_hw_qtop_free(ocs_hw_qtop_t *qtop)
1664 {
1665         if (qtop != NULL) {
1666                 if (qtop->entries != NULL) {
1667                         ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries));
1668                 }
1669                 ocs_free(qtop->os, qtop, sizeof(*qtop));
1670         }
1671 }
1672
1673 /* Uncomment this to turn on RQ debug */
1674 // #define ENABLE_DEBUG_RQBUF
1675
1676 static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id);
1677 static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex);
1678 static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1679 static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1680
1681 /**
1682  * @brief Process receive queue completions for RQ Pair mode.
1683  *
1684  * @par Description
1685  * RQ completions are processed. In RQ pair mode, a single header and single payload
1686  * buffer are received, and passed to the function that has registered for unsolicited
1687  * callbacks.
1688  *
1689  * @param hw Hardware context.
1690  * @param cq Pointer to HW completion queue.
1691  * @param cqe Completion queue entry.
1692  *
1693  * @return Returns 0 for success, or a negative error code value for failure.
1694  */
1695
1696 int32_t
1697 ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1698 {
1699         uint16_t rq_id;
1700         uint32_t index;
1701         int32_t rqindex;
1702         int32_t  rq_status;
1703         uint32_t h_len;
1704         uint32_t p_len;
1705         ocs_hw_sequence_t *seq;
1706
1707         rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1708         if (0 != rq_status) {
1709                 switch (rq_status) {
1710                 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1711                 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1712                         /* just get RQ buffer then return to chip */
1713                         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1714                         if (rqindex < 0) {
1715                                 ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1716                                              rq_status, rq_id);
1717                                 break;
1718                         }
1719
1720                         /* get RQ buffer */
1721                         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1722
1723                         /* return to chip */
1724                         if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1725                                 ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n",
1726                                              rq_status);
1727                                 break;
1728                         }
1729                         break;
1730                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1731                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1732                         /* since RQ buffers were not consumed, cannot return them to chip */
1733                         /* fall through */
1734                         ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1735                 default:
1736                         break;
1737                 }
1738                 return -1;
1739         }
1740
1741         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1742         if (rqindex < 0) {
1743                 ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1744                 return -1;
1745         }
1746
1747         OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1748                  rq->payload_use_count++;})
1749
1750         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1751         ocs_hw_assert(seq != NULL);
1752
1753         seq->hw = hw;
1754         seq->auto_xrdy = 0;
1755         seq->out_of_xris = 0;
1756         seq->xri = 0;
1757         seq->hio = NULL;
1758
1759         sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1760         seq->header->dma.len = h_len;
1761         seq->payload->dma.len = p_len;
1762         seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1763         seq->hw_priv = cq->eq;
1764
1765         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1766         if (hw->config.bounce) {
1767                 fc_header_t *hdr = seq->header->dma.virt;
1768                 uint32_t s_id = fc_be24toh(hdr->s_id);
1769                 uint32_t d_id = fc_be24toh(hdr->d_id);
1770                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1771                 if (hw->callback.bounce != NULL) {
1772                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1773                 }
1774         } else {
1775                 hw->callback.unsolicited(hw->args.unsolicited, seq);
1776         }
1777
1778         return 0;
1779 }
1780
1781 /**
1782  * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy
1783  *
1784  * @par Description
1785  * RQ completions are processed. In RQ pair mode, a single header and single payload
1786  * buffer are received, and passed to the function that has registered for unsolicited
1787  * callbacks.
1788  *
1789  * @param hw Hardware context.
1790  * @param cq Pointer to HW completion queue.
1791  * @param cqe Completion queue entry.
1792  *
1793  * @return Returns 0 for success, or a negative error code value for failure.
1794  */
1795
1796 int32_t
1797 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1798 {
1799         /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1800         sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe;
1801         uint16_t rq_id;
1802         uint32_t index;
1803         int32_t rqindex;
1804         int32_t  rq_status;
1805         uint32_t h_len;
1806         uint32_t p_len;
1807         ocs_hw_sequence_t *seq;
1808         uint8_t axr_lock_taken = 0;
1809 #if defined(OCS_DISC_SPIN_DELAY)
1810         uint32_t        delay = 0;
1811         char            prop_buf[32];
1812 #endif
1813
1814         rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1815         if (0 != rq_status) {
1816                 switch (rq_status) {
1817                 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1818                 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1819                         /* just get RQ buffer then return to chip */
1820                         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1821                         if (rqindex < 0) {
1822                                 ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1823                                             rq_status, rq_id);
1824                                 break;
1825                         }
1826
1827                         /* get RQ buffer */
1828                         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1829
1830                         /* return to chip */
1831                         if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1832                                 ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n",
1833                                             rq_status);
1834                                 break;
1835                         }
1836                         break;
1837                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1838                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1839                         /* since RQ buffers were not consumed, cannot return them to chip */
1840                         ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1841                         /* fall through */
1842                 default:
1843                         break;
1844                 }
1845                 return -1;
1846         }
1847
1848         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1849         if (rqindex < 0) {
1850                 ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1851                 return -1;
1852         }
1853
1854         OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1855                  rq->payload_use_count++;})
1856
1857         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1858         ocs_hw_assert(seq != NULL);
1859
1860         seq->hw = hw;
1861         seq->auto_xrdy = opt_wr->agxr;
1862         seq->out_of_xris = opt_wr->oox;
1863         seq->xri = opt_wr->xri;
1864         seq->hio = NULL;
1865
1866         sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1867         seq->header->dma.len = h_len;
1868         seq->payload->dma.len = p_len;
1869         seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1870         seq->hw_priv = cq->eq;
1871
1872         if (seq->auto_xrdy) {
1873                 fc_header_t *fc_hdr = seq->header->dma.virt;
1874
1875                 seq->hio = ocs_hw_io_lookup(hw, seq->xri);
1876                 ocs_lock(&seq->hio->axr_lock);
1877                 axr_lock_taken = 1;
1878
1879                 /* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */
1880                 seq->hio->axr_buf->fcfi = seq->fcfi;
1881                 seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id;
1882                 seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id;
1883                 seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id;
1884                 seq->hio->axr_buf->cmd_cqe = 1;
1885
1886                 /*
1887                  * Since auto xfer rdy is used for this IO, then clear the sequence
1888                  * initiative bit in the header so that the upper layers wait for the
1889                  * data. This should flow exactly like the first burst case.
1890                  */
1891                 fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE);
1892
1893                 /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
1894                 if (seq->hio->type == OCS_HW_IO_TARGET_RSP) {
1895                         seq->hio->axr_buf->call_axr_cmd = 1;
1896                         seq->hio->axr_buf->cmd_seq = seq;
1897                         goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd;
1898                 }
1899         }
1900
1901         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1902         if (hw->config.bounce) {
1903                 fc_header_t *hdr = seq->header->dma.virt;
1904                 uint32_t s_id = fc_be24toh(hdr->s_id);
1905                 uint32_t d_id = fc_be24toh(hdr->d_id);
1906                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1907                 if (hw->callback.bounce != NULL) {
1908                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1909                 }
1910         } else {
1911                 hw->callback.unsolicited(hw->args.unsolicited, seq);
1912         }
1913
1914         if (seq->auto_xrdy) {
1915                 /* If data cqe came before cmd cqe in out of order in case of AXR */
1916                 if(seq->hio->axr_buf->data_cqe == 1) {
1917
1918 #if defined(OCS_DISC_SPIN_DELAY)
1919                         if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
1920                                 delay = ocs_strtoul(prop_buf, 0, 0);
1921                                 ocs_udelay(delay);
1922                         }
1923 #endif
1924                         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1925                         if (hw->config.bounce) {
1926                                 fc_header_t *hdr = seq->header->dma.virt;
1927                                 uint32_t s_id = fc_be24toh(hdr->s_id);
1928                                 uint32_t d_id = fc_be24toh(hdr->d_id);
1929                                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1930                                 if (hw->callback.bounce != NULL) {
1931                                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id);
1932                                 }
1933                         } else {
1934                                 hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq);
1935                         }
1936                 }
1937         }
1938
1939 exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd:
1940         if(axr_lock_taken) {
1941                 ocs_unlock(&seq->hio->axr_lock);
1942         }
1943         return 0;
1944 }
1945
1946 /**
1947  * @brief Process CQ completions for Auto xfer rdy data phases.
1948  *
1949  * @par Description
1950  * The data is DMA'd into the data buffer posted to the SGL prior to the XRI
1951  * being assigned to an IO. When the completion is received, All of the data
1952  * is in the single buffer.
1953  *
1954  * @param hw Hardware context.
1955  * @param cq Pointer to HW completion queue.
1956  * @param cqe Completion queue entry.
1957  *
1958  * @return Returns 0 for success, or a negative error code value for failure.
1959  */
1960
1961 int32_t
1962 ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1963 {
1964         /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1965         sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe;
1966         ocs_hw_sequence_t *seq;
1967         ocs_hw_io_t *io;
1968         ocs_hw_auto_xfer_rdy_buffer_t *buf;
1969 #if defined(OCS_DISC_SPIN_DELAY)
1970         uint32_t        delay = 0;
1971         char            prop_buf[32];
1972 #endif
1973         /* Look up the IO */
1974         io = ocs_hw_io_lookup(hw, opt_wr->xri);
1975         ocs_lock(&io->axr_lock);
1976         buf = io->axr_buf;
1977         buf->data_cqe = 1;
1978         seq = &buf->seq;
1979         seq->hw = hw;
1980         seq->auto_xrdy = 1;
1981         seq->out_of_xris = 0;
1982         seq->xri = opt_wr->xri;
1983         seq->hio = io;
1984         seq->header = &buf->header;
1985         seq->payload = &buf->payload;
1986
1987         seq->header->dma.len = sizeof(fc_header_t);
1988         seq->payload->dma.len = opt_wr->total_data_placed;
1989         seq->fcfi = buf->fcfi;
1990         seq->hw_priv = cq->eq;
1991
1992
1993         if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) {
1994                 seq->status = OCS_HW_UNSOL_SUCCESS;
1995         } else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) {
1996                 seq->status = OCS_HW_UNSOL_ABTS_RCVD;
1997         } else {
1998                 seq->status = OCS_HW_UNSOL_ERROR;
1999         }
2000
2001         /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
2002         if(io->type == OCS_HW_IO_TARGET_RSP) {
2003                 io->axr_buf->call_axr_data = 1;
2004                 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2005         }
2006
2007         if(!buf->cmd_cqe) {
2008                 /* if data cqe came before cmd cqe, return here, cmd cqe will handle */
2009                 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2010         }
2011 #if defined(OCS_DISC_SPIN_DELAY)
2012         if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
2013                 delay = ocs_strtoul(prop_buf, 0, 0);
2014                 ocs_udelay(delay);
2015         }
2016 #endif
2017
2018         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
2019         if (hw->config.bounce) {
2020                 fc_header_t *hdr = seq->header->dma.virt;
2021                 uint32_t s_id = fc_be24toh(hdr->s_id);
2022                 uint32_t d_id = fc_be24toh(hdr->d_id);
2023                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
2024                 if (hw->callback.bounce != NULL) {
2025                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
2026                 }
2027         } else {
2028                 hw->callback.unsolicited(hw->args.unsolicited, seq);
2029         }
2030
2031 exit_ocs_hw_rqpair_process_auto_xfr_rdy_data:
2032         ocs_unlock(&io->axr_lock);
2033         return 0;
2034 }
2035
2036 /**
2037  * @brief Return pointer to RQ buffer entry.
2038  *
2039  * @par Description
2040  * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex.
2041  *
2042  * @param hw Hardware context.
2043  * @param rqindex Index of the RQ that is being processed.
2044  * @param bufindex Index into the RQ that is being processed.
2045  *
2046  * @return Pointer to the sequence structure, or NULL otherwise.
2047  */
2048 static ocs_hw_sequence_t *
2049 ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex)
2050 {
2051         sli4_queue_t *rq_hdr = &hw->rq[rqindex];
2052         sli4_queue_t *rq_payload = &hw->rq[rqindex+1];
2053         ocs_hw_sequence_t *seq = NULL;
2054         hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
2055
2056 #if defined(ENABLE_DEBUG_RQBUF)
2057         uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff);
2058 #endif
2059
2060         if (bufindex >= rq_hdr->length) {
2061                 ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n",
2062                             rqindex, bufindex, rq_hdr->length, rq_hdr->id);
2063                 return NULL;
2064         }
2065
2066         sli_queue_lock(rq_hdr);
2067         sli_queue_lock(rq_payload);
2068
2069 #if defined(ENABLE_DEBUG_RQBUF)
2070         /* Put a debug value into the rq, to track which entries are still valid */
2071         _sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value);
2072         _sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value);
2073 #endif
2074
2075         seq = rq->rq_tracker[bufindex];
2076         rq->rq_tracker[bufindex] = NULL;
2077
2078         if (seq == NULL ) {
2079                 ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n",
2080                             rqindex, bufindex, rq_hdr->index);
2081         }
2082
2083         sli_queue_unlock(rq_payload);
2084         sli_queue_unlock(rq_hdr);
2085         return seq;
2086 }
2087
2088 /**
2089  * @brief Posts an RQ buffer to a queue and update the verification structures
2090  *
2091  * @param hw            hardware context
2092  * @param seq Pointer to sequence object.
2093  *
2094  * @return Returns 0 on success, or a non-zero value otherwise.
2095  */
2096 static int32_t
2097 ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2098 {
2099         sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex];
2100         sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex];
2101         uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
2102         hw_rq_t *rq = hw->hw_rq[hw_rq_index];
2103         uint32_t     phys_hdr[2];
2104         uint32_t     phys_payload[2];
2105         int32_t      qindex_hdr;
2106         int32_t      qindex_payload;
2107
2108         /* Update the RQ verification lookup tables */
2109         phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys);
2110         phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys);
2111         phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys);
2112         phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys);
2113
2114         sli_queue_lock(rq_hdr);
2115         sli_queue_lock(rq_payload);
2116
2117         /*
2118          * Note: The header must be posted last for buffer pair mode because
2119          *       posting on the header queue posts the payload queue as well.
2120          *       We do not ring the payload queue independently in RQ pair mode.
2121          */
2122         qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload);
2123         qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr);
2124         if (qindex_hdr < 0 ||
2125             qindex_payload < 0) {
2126                 ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
2127                 sli_queue_unlock(rq_payload);
2128                 sli_queue_unlock(rq_hdr);
2129                 return OCS_HW_RTN_ERROR;
2130         }
2131
2132         /* ensure the indexes are the same */
2133         ocs_hw_assert(qindex_hdr == qindex_payload);
2134
2135         /* Update the lookup table */
2136         if (rq->rq_tracker[qindex_hdr] == NULL) {
2137                 rq->rq_tracker[qindex_hdr] = seq;
2138         } else {
2139                 ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n",
2140                              hw_rq_index, qindex_hdr);
2141         }
2142
2143         sli_queue_unlock(rq_payload);
2144         sli_queue_unlock(rq_hdr);
2145         return OCS_HW_RTN_SUCCESS;
2146 }
2147
2148 /**
2149  * @brief Return RQ buffers (while in RQ pair mode).
2150  *
2151  * @par Description
2152  * The header and payload buffers are returned to the Receive Queue.
2153  *
2154  * @param hw Hardware context.
2155  * @param seq Header/payload sequence buffers.
2156  *
2157  * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure.
2158  */
2159
2160 ocs_hw_rtn_e
2161 ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2162 {
2163         ocs_hw_rtn_e   rc = OCS_HW_RTN_SUCCESS;
2164
2165         /* Check for auto xfer rdy dummy buffers and call the proper release function. */
2166         if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) {
2167                 return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq);
2168         }
2169
2170         /*
2171          * Post the data buffer first. Because in RQ pair mode, ringing the
2172          * doorbell of the header ring will post the data buffer as well.
2173          */
2174         if (ocs_hw_rqpair_put(hw, seq)) {
2175                 ocs_log_err(hw->os, "error writing buffers\n");
2176                 return OCS_HW_RTN_ERROR;
2177         }
2178
2179         return rc;
2180 }
2181
2182 /**
2183  * @brief Find the RQ index of RQ_ID.
2184  *
2185  * @param hw Hardware context.
2186  * @param rq_id RQ ID to find.
2187  *
2188  * @return Returns the RQ index, or -1 if not found
2189  */
2190 static inline int32_t
2191 ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id)
2192 {
2193         return ocs_hw_queue_hash_find(hw->rq_hash, rq_id);
2194 }
2195
2196 /**
2197  * @ingroup devInitShutdown
2198  * @brief Allocate auto xfer rdy buffers.
2199  *
2200  * @par Description
2201  * Allocates the auto xfer rdy buffers and places them on the free list.
2202  *
2203  * @param hw Hardware context allocated by the caller.
2204  * @param num_buffers Number of buffers to allocate.
2205  *
2206  * @return Returns 0 on success, or a non-zero value on failure.
2207  */
2208 ocs_hw_rtn_e
2209 ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers)
2210 {
2211         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2212         uint32_t i;
2213
2214         hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE);
2215         if (hw->auto_xfer_rdy_buf_pool == NULL) {
2216                 ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n");
2217                 return OCS_HW_RTN_NO_MEMORY;
2218         }
2219
2220         for (i = 0; i < num_buffers; i++) {
2221                 /* allocate the wrapper object */
2222                 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2223                 ocs_hw_assert(buf != NULL);
2224
2225                 /* allocate the auto xfer ready buffer */
2226                 if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) {
2227                         ocs_log_err(hw->os, "DMA allocation failed\n");
2228                         ocs_free(hw->os, buf, sizeof(*buf));
2229                         return OCS_HW_RTN_NO_MEMORY;
2230                 }
2231
2232                 /* build a fake data header in big endian */
2233                 buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2234                 buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2235                 buf->hdr.type = FC_TYPE_FCP;
2236                 buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2237                                             FC_FCTL_FIRST_SEQUENCE |
2238                                             FC_FCTL_LAST_SEQUENCE |
2239                                             FC_FCTL_END_SEQUENCE |
2240                                             FC_FCTL_SEQUENCE_INITIATIVE);
2241
2242                 /* build the fake header DMA object */
2243                 buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2244                 buf->header.dma.virt = &buf->hdr;
2245                 buf->header.dma.alloc = buf;
2246                 buf->header.dma.size = sizeof(buf->hdr);
2247                 buf->header.dma.len = sizeof(buf->hdr);
2248
2249                 buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2250         }
2251         return OCS_HW_RTN_SUCCESS;
2252 }
2253
2254 /**
2255  * @ingroup devInitShutdown
2256  * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX.
2257  *
2258  * @par Description
2259  * When new buffers are freed, check existing XRIs waiting for buffers.
2260  *
2261  * @param hw Hardware context allocated by the caller.
2262  */
2263 static void
2264 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw)
2265 {
2266         ocs_hw_io_t *io;
2267         int32_t rc;
2268
2269         ocs_lock(&hw->io_lock);
2270
2271         while (!ocs_list_empty(&hw->io_port_dnrx)) {
2272                 io = ocs_list_remove_head(&hw->io_port_dnrx);
2273                 rc = ocs_hw_reque_xri(hw, io);
2274                 if(rc) {
2275                         break;
2276                 }
2277         }
2278
2279         ocs_unlock(&hw->io_lock);
2280 }
2281
2282 /**
2283  * @brief Called when the POST_SGL_PAGE command completes.
2284  *
2285  * @par Description
2286  * Free the mailbox command buffer.
2287  *
2288  * @param hw Hardware context.
2289  * @param status Status field from the mbox completion.
2290  * @param mqe Mailbox response structure.
2291  * @param arg Pointer to a callback function that signals the caller that the command is done.
2292  *
2293  * @return Returns 0.
2294  */
2295 static int32_t
2296 ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
2297 {
2298         if (status != 0) {
2299                 ocs_log_debug(hw->os, "Status 0x%x\n", status);
2300         }
2301
2302         ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2303         return 0;
2304 }
2305
2306 /**
2307  * @brief Prepares an XRI to move to the chip.
2308  *
2309  * @par Description
2310  * Puts the data SGL into the SGL list for the IO object and possibly registers
2311  * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are
2312  * mailbox commands, we don't need to wait for completion before preceding.
2313  *
2314  * @param hw Hardware context allocated by the caller.
2315  * @param io Pointer to the IO object.
2316  *
2317  * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure.
2318  */
2319 ocs_hw_rtn_e
2320 ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io)
2321 {
2322         /* We only need to preregister the SGL if it has not yet been done. */
2323         if (!sli_get_sgl_preregister(&hw->sli)) {
2324                 uint8_t *post_sgl;
2325                 ocs_dma_t *psgls = &io->def_sgl;
2326                 ocs_dma_t **sgls = &psgls;
2327
2328                 /* non-local buffer required for mailbox queue */
2329                 post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2330                 if (post_sgl == NULL) {
2331                         ocs_log_err(hw->os, "no buffer for command\n");
2332                         return OCS_HW_RTN_NO_MEMORY;
2333                 }
2334                 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE,
2335                                                 io->indicator, 1, sgls, NULL, NULL)) {
2336                         if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT,
2337                                             ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) {
2338                                 ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE);
2339                                 ocs_log_err(hw->os, "SGL post failed\n");
2340                                 return OCS_HW_RTN_ERROR;
2341                         }
2342                 }
2343         }
2344
2345         ocs_lock(&hw->io_lock);
2346         if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */
2347                 ocs_unlock(&hw->io_lock);
2348                 return OCS_HW_RTN_ERROR;
2349         }
2350         ocs_unlock(&hw->io_lock);
2351         return OCS_HW_RTN_SUCCESS;
2352 }
2353
2354 /**
2355  * @brief Prepares an XRI to move back to the host.
2356  *
2357  * @par Description
2358  * Releases any attached buffer back to the pool.
2359  *
2360  * @param hw Hardware context allocated by the caller.
2361  * @param io Pointer to the IO object.
2362  */
2363 void
2364 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io)
2365 {
2366         if (io->axr_buf != NULL) {
2367                 ocs_lock(&hw->io_lock);
2368                         /* check  list and remove if there */
2369                         if (ocs_list_on_list(&io->dnrx_link)) {
2370                                 ocs_list_remove(&hw->io_port_dnrx, io);
2371                                 io->auto_xfer_rdy_dnrx = 0;
2372
2373                                 /* release the count for waiting for a buffer */
2374                                 ocs_hw_io_free(hw, io);
2375                         }
2376
2377                         ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
2378                         io->axr_buf = NULL;
2379                 ocs_unlock(&hw->io_lock);
2380
2381                 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2382         }
2383         return;
2384 }
2385
2386
2387 /**
2388  * @brief Posts an auto xfer rdy buffer to an IO.
2389  *
2390  * @par Description
2391  * Puts the data SGL into the SGL list for the IO object
2392  * @n @name
2393  * @b Note: io_lock must be held.
2394  *
2395  * @param hw Hardware context allocated by the caller.
2396  * @param io Pointer to the IO object.
2397  *
2398  * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs.
2399  */
2400 uint8_t
2401 ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf)
2402 {
2403         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2404         sli4_sge_t      *data;
2405
2406         if(!reuse_buf) {
2407                 buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool);
2408                 io->axr_buf = buf;
2409         }
2410
2411         data = io->def_sgl.virt;
2412         data[0].sge_type = SLI4_SGE_TYPE_SKIP;
2413         data[0].last = 0;
2414
2415         /*
2416          * Note: if we are doing DIF assists, then the SGE[1] must contain the
2417          * DI_SEED SGE. The host is responsible for programming:
2418          *   SGE Type (Word 2, bits 30:27)
2419          *   Replacement App Tag (Word 2 bits 15:0)
2420          *   App Tag (Word 3 bits 15:0)
2421          *   New Ref Tag (Word 3 bit 23)
2422          *   Metadata Enable (Word 3 bit 20)
2423          *   Auto-Increment RefTag (Word 3 bit 19)
2424          *   Block Size (Word 3 bits 18:16)
2425          * The following fields are managed by the SLI Port:
2426          *    Ref Tag Compare (Word 0)
2427          *    Replacement Ref Tag (Word 1) - In not the LBA
2428          *    NA (Word 2 bit 25)
2429          *    Opcode RX (Word 3 bits 27:24)
2430          *    Checksum Enable (Word 3 bit 22)
2431          *    RefTag Enable (Word 3 bit 21)
2432          *
2433          * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared.
2434          */
2435         if (hw->config.auto_xfer_rdy_p_type) {
2436                 sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1];
2437
2438                 diseed->sge_type = SLI4_SGE_TYPE_DISEED;
2439                 diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value;
2440                 diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value;
2441                 diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid;
2442                 diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */
2443                 diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
2444         } else {
2445                 data[1].sge_type = SLI4_SGE_TYPE_SKIP;
2446                 data[1].last = 0;
2447         }
2448
2449         data[2].sge_type = SLI4_SGE_TYPE_DATA;
2450         data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys);
2451         data[2].buffer_address_low  = ocs_addr32_lo(io->axr_buf->payload.dma.phys);
2452         data[2].buffer_length = io->axr_buf->payload.dma.size;
2453         data[2].last = TRUE;
2454         data[3].sge_type = SLI4_SGE_TYPE_SKIP;
2455
2456         return 0;
2457 }
2458
2459 /**
2460  * @brief Return auto xfer ready buffers (while in RQ pair mode).
2461  *
2462  * @par Description
2463  * The header and payload buffers are returned to the auto xfer rdy pool.
2464  *
2465  * @param hw Hardware context.
2466  * @param seq Header/payload sequence buffers.
2467  *
2468  * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure.
2469  */
2470
2471 static ocs_hw_rtn_e
2472 ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2473 {
2474         ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc;
2475
2476         buf->data_cqe = 0;
2477         buf->cmd_cqe = 0;
2478         buf->fcfi = 0;
2479         buf->call_axr_cmd = 0;
2480         buf->call_axr_data = 0;
2481
2482         /* build a fake data header in big endian */
2483         buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2484         buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2485         buf->hdr.type = FC_TYPE_FCP;
2486         buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2487                                         FC_FCTL_FIRST_SEQUENCE |
2488                                         FC_FCTL_LAST_SEQUENCE |
2489                                         FC_FCTL_END_SEQUENCE |
2490                                         FC_FCTL_SEQUENCE_INITIATIVE);
2491
2492         /* build the fake header DMA object */
2493         buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2494         buf->header.dma.virt = &buf->hdr;
2495         buf->header.dma.alloc = buf;
2496         buf->header.dma.size = sizeof(buf->hdr);
2497         buf->header.dma.len = sizeof(buf->hdr);
2498         buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2499
2500         ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2501
2502         return OCS_HW_RTN_SUCCESS;
2503 }
2504
2505 /**
2506  * @ingroup devInitShutdown
2507  * @brief Free auto xfer rdy buffers.
2508  *
2509  * @par Description
2510  * Frees the auto xfer rdy buffers.
2511  *
2512  * @param hw Hardware context allocated by the caller.
2513  *
2514  * @return Returns 0 on success, or a non-zero value on failure.
2515  */
2516 static void
2517 ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw)
2518 {
2519         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2520         uint32_t i;
2521
2522         if (hw->auto_xfer_rdy_buf_pool != NULL) {
2523                 ocs_lock(&hw->io_lock);
2524                         for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) {
2525                                 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2526                                 if (buf != NULL) {
2527                                         ocs_dma_free(hw->os, &buf->payload.dma);
2528                                 }
2529                         }
2530                 ocs_unlock(&hw->io_lock);
2531
2532                 ocs_pool_free(hw->auto_xfer_rdy_buf_pool);
2533                 hw->auto_xfer_rdy_buf_pool = NULL;
2534         }
2535 }
2536
2537 /**
2538  * @ingroup devInitShutdown
2539  * @brief Configure the rq_pair function from ocs_hw_init().
2540  *
2541  * @par Description
2542  * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature.
2543  *
2544  * @param hw Hardware context allocated by the caller.
2545  *
2546  * @return Returns 0 on success, or a non-zero value on failure.
2547  */
2548 ocs_hw_rtn_e
2549 ocs_hw_rqpair_init(ocs_hw_t *hw)
2550 {
2551         ocs_hw_rtn_e    rc;
2552         uint32_t xris_posted;
2553
2554         ocs_log_debug(hw->os, "RQ Pair mode\n");
2555
2556         /*
2557          * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would
2558          * return with an error. So allocate the buffers based on the initial XRI pool required to support this
2559          * feature.
2560          */
2561         if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
2562             hw->config.auto_xfer_rdy_size > 0) {
2563                 if (hw->auto_xfer_rdy_buf_pool == NULL) {
2564                         /*
2565                          * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have
2566                          * one to post back for the case where the response phase is started in the context of
2567                          * the data completion.
2568                          */
2569                         rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1);
2570                         if (rc != OCS_HW_RTN_SUCCESS) {
2571                                 return rc;
2572                         }
2573                 } else {
2574                         ocs_pool_reset(hw->auto_xfer_rdy_buf_pool);
2575                 }
2576
2577                 /* Post the auto XFR_RDY XRIs */
2578                 xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt);
2579                 if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) {
2580                         ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted);
2581                         return OCS_HW_RTN_ERROR;
2582                 }
2583         }
2584
2585         return 0;
2586 }
2587
2588 /**
2589  * @ingroup devInitShutdown
2590  * @brief Tear down the rq_pair function from ocs_hw_teardown().
2591  *
2592  * @par Description
2593  * Frees the buffers to auto xfer rdy.
2594  *
2595  * @param hw Hardware context allocated by the caller.
2596  */
2597 void
2598 ocs_hw_rqpair_teardown(ocs_hw_t *hw)
2599 {
2600         /* We need to free any auto xfer ready buffers */
2601         ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw);
2602 }