]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ocs_fc/ocs_hw_queues.c
Update to bmake-20200710
[FreeBSD/FreeBSD.git] / sys / dev / ocs_fc / ocs_hw_queues.c
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33
34 /**
35  * @file
36  *
37  */
38
39 #include "ocs_os.h"
40 #include "ocs_hw.h"
41 #include "ocs_hw_queues.h"
42
43 #define HW_QTOP_DEBUG           0
44
45 /**
46  * @brief Initialize queues
47  *
48  * Given the parsed queue topology spec, the SLI queues are created and
49  * initialized
50  *
51  * @param hw pointer to HW object
52  * @param qtop pointer to queue topology
53  *
54  * @return returns 0 for success, an error code value for failure.
55  */
56 ocs_hw_rtn_e
57 ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
58 {
59         uint32_t i, j;
60         uint32_t default_lengths[QTOP_LAST], len;
61         uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0;
62         uint8_t rqset_filter_mask = 0;
63         hw_eq_t *eqs[hw->config.n_rq];
64         hw_cq_t *cqs[hw->config.n_rq];
65         hw_rq_t *rqs[hw->config.n_rq];
66         ocs_hw_qtop_entry_t *qt, *next_qt;
67         ocs_hw_mrq_t mrq;
68         bool use_mrq = FALSE;
69
70         hw_eq_t *eq = NULL;
71         hw_cq_t *cq = NULL;
72         hw_wq_t *wq = NULL;
73         hw_rq_t *rq = NULL;
74         hw_mq_t *mq = NULL;
75
76         mrq.num_pairs = 0;
77         default_lengths[QTOP_EQ] = 1024;
78         default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ];
79         default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ];
80         default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ];
81         default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH;
82
83         ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG);
84
85         hw->eq_count = 0;
86         hw->cq_count = 0;
87         hw->mq_count = 0;
88         hw->wq_count = 0;
89         hw->rq_count = 0;
90         hw->hw_rq_count = 0;
91         ocs_list_init(&hw->eq_list, hw_eq_t, link);
92
93         /* If MRQ is requested, Check if it is supported by SLI. */
94         if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) {
95                 ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n");
96                 return OCS_HW_RTN_ERROR;
97         }
98
99         if (hw->config.n_rq > 1)
100                 use_mrq = TRUE;
101
102         /* Allocate class WQ pools */
103         for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
104                 hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
105                 if (hw->wq_class_array[i] == NULL) {
106                         ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
107                         return OCS_HW_RTN_NO_MEMORY;
108                 }
109         }
110
111         /* Allocate per CPU WQ pools */
112         for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
113                 hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
114                 if (hw->wq_cpu_array[i] == NULL) {
115                         ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
116                         return OCS_HW_RTN_NO_MEMORY;
117                 }
118         }
119
120
121         ocs_hw_assert(qtop != NULL);
122
123         for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
124                 if (i == qtop->inuse_count - 1)
125                         next_qt = NULL;
126                 else
127                         next_qt = qt + 1;
128
129                 switch(qt->entry) {
130                 case QTOP_EQ:
131                         len = (qt->len) ? qt->len : default_lengths[QTOP_EQ];
132
133                         if (qt->set_default) {
134                                 default_lengths[QTOP_EQ] = len;
135                                 break;
136                         }
137
138                         eq = hw_new_eq(hw, len);
139                         if (eq == NULL) {
140                                 hw_queue_teardown(hw);
141                                 return OCS_HW_RTN_NO_MEMORY;
142                         }
143                         break;
144
145                 case QTOP_CQ:
146                         len = (qt->len) ? qt->len : default_lengths[QTOP_CQ];
147
148                         if (qt->set_default) {
149                                 default_lengths[QTOP_CQ] = len;
150                                 break;
151                         }
152                         
153                         if (!eq || !next_qt) {
154                                 goto fail;
155                         }
156
157                         /* If this CQ is for MRQ, then delay the creation */
158                         if (!use_mrq || next_qt->entry != QTOP_RQ) {
159                                 cq = hw_new_cq(eq, len);
160                                 if (cq == NULL) {
161                                         goto fail;
162                                 }
163                         }
164                         break;
165
166                 case QTOP_WQ: {
167
168                         len = (qt->len) ? qt->len : default_lengths[QTOP_WQ];
169                         if (qt->set_default) {
170                                 default_lengths[QTOP_WQ] = len;
171                                 break;
172                         }
173
174                         if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
175                                 ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp);
176                                 hw_queue_teardown(hw);
177                                 return OCS_HW_RTN_NO_MEMORY;
178                         }
179                         
180                         if (cq == NULL)
181                                 goto fail;
182
183                         wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp);
184                         if (wq == NULL) {
185                                 goto fail;
186                         }
187
188                         /* Place this WQ on the EQ WQ array */
189                         if (ocs_varray_add(eq->wq_array, wq)) {
190                                 ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n");
191                                 hw_queue_teardown(hw);
192                                 return OCS_HW_RTN_ERROR;
193                         }
194
195                         /* Place this WQ on the HW class array */
196                         if (qt->class < ARRAY_SIZE(hw->wq_class_array)) {
197                                 if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) {
198                                         ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n");
199                                         hw_queue_teardown(hw);
200                                         return OCS_HW_RTN_ERROR;
201                                 }
202                         } else {
203                                 ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class);
204                                 hw_queue_teardown(hw);
205                                 return OCS_HW_RTN_ERROR;
206                         }
207
208                         /*
209                          * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given
210                          * by the EQ instance modulo number of CPUs
211                          */
212                         if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) {
213                                 ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n");
214                                 hw_queue_teardown(hw);
215                                 return OCS_HW_RTN_ERROR;
216                         }
217
218                         break;
219                 }
220                 case QTOP_RQ: {
221                         len = (qt->len) ? qt->len : default_lengths[QTOP_RQ];
222                         if (qt->set_default) {
223                                 default_lengths[QTOP_RQ] = len;
224                                 break;
225                         }
226
227                         if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
228                                 ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp);
229                                 hw_queue_teardown(hw);
230                                 return OCS_HW_RTN_NO_MEMORY;
231                         }
232
233                         if (use_mrq) {
234                                 mrq.rq_cfg[mrq.num_pairs].len = len;
235                                 mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp; 
236                                 mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask;
237                                 mrq.rq_cfg[mrq.num_pairs].eq = eq;
238                                 mrq.num_pairs ++;
239                         } else {
240                                 rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp);
241                                 if (rq == NULL) {
242                                         hw_queue_teardown(hw);
243                                         return OCS_HW_RTN_NO_MEMORY;
244                                 }
245                                 rq->filter_mask = qt->filter_mask;
246                         }
247                         break;
248                 }
249
250                 case QTOP_MQ:
251                         len = (qt->len) ? qt->len : default_lengths[QTOP_MQ];
252                         if (qt->set_default) {
253                                 default_lengths[QTOP_MQ] = len;
254                                 break;
255                         }
256
257                         if (cq == NULL)
258                                 goto fail;
259
260                         mq = hw_new_mq(cq, len);
261                         if (mq == NULL) {
262                                 goto fail;
263                         }
264                         break;
265
266                 default:
267                         ocs_hw_assert(0);
268                         break;
269                 }
270         }
271
272         if (mrq.num_pairs) {
273                 /* First create normal RQs. */
274                 for (i = 0; i < mrq.num_pairs; i++) {
275                         for (j = 0; j < mrq.num_pairs; j++) {
276                                 if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) {
277                                         /* This should be created using set */
278                                         if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) {
279                                                 ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n");
280                                                 hw_queue_teardown(hw);
281                                                 return OCS_HW_RTN_ERROR;
282                                         } else if (!rqset_filter_mask){
283                                                 rqset_filter_mask = mrq.rq_cfg[i].filter_mask;
284                                                 rqset_len = mrq.rq_cfg[i].len;
285                                                 rqset_ulp = mrq.rq_cfg[i].ulp;
286                                         }
287                                         eqs[rqset_count] = mrq.rq_cfg[i].eq;
288                                         rqset_count++;
289                                         break;
290                                 }
291                         }
292                         if (j == mrq.num_pairs) {
293                                 /* Normal RQ */
294                                 cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]);
295                                 if (cq == NULL) {
296                                         hw_queue_teardown(hw);
297                                         return OCS_HW_RTN_NO_MEMORY;
298                                 }
299
300                                 rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp);
301                                 if (rq == NULL) {
302                                         hw_queue_teardown(hw);
303                                         return OCS_HW_RTN_NO_MEMORY;
304                                 }
305                                 rq->filter_mask = mrq.rq_cfg[i].filter_mask;
306                         }
307                 }
308
309                 /* Now create RQ Set */
310                 if (rqset_count) {
311                         if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) {
312                                 ocs_log_crit(hw->os,
313                                              "Max Supported MRQ pairs = %d\n",
314                                              OCE_HW_MAX_NUM_MRQ_PAIRS);
315                                 hw_queue_teardown(hw);
316                                 return OCS_HW_RTN_ERROR;
317                         }
318
319                         /* Create CQ set */
320                         if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) {
321                                 hw_queue_teardown(hw);
322                                 return OCS_HW_RTN_ERROR;
323                         }
324
325                         /* Create RQ set */
326                         if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) {
327                                 hw_queue_teardown(hw);
328                                 return OCS_HW_RTN_ERROR;
329                         }
330
331                         for (i = 0; i < rqset_count ; i++) {
332                                 rqs[i]->filter_mask = rqset_filter_mask;
333                                 rqs[i]->is_mrq = TRUE;
334                                 rqs[i]->base_mrq_id = rqs[0]->hdr->id;
335                         }
336
337                         hw->hw_mrq_count = rqset_count;
338                 }
339         }
340
341         return OCS_HW_RTN_SUCCESS;
342 fail:
343         hw_queue_teardown(hw);
344         return OCS_HW_RTN_NO_MEMORY;
345
346 }
347
348 /**
349  * @brief Allocate a new EQ object
350  *
351  * A new EQ object is instantiated
352  *
353  * @param hw pointer to HW object
354  * @param entry_count number of entries in the EQ
355  *
356  * @return pointer to allocated EQ object
357  */
358 hw_eq_t*
359 hw_new_eq(ocs_hw_t *hw, uint32_t entry_count)
360 {
361         hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT);
362
363         if (eq != NULL) {
364                 eq->type = SLI_QTYPE_EQ;
365                 eq->hw = hw;
366                 eq->entry_count = entry_count;
367                 eq->instance = hw->eq_count++;
368                 eq->queue = &hw->eq[eq->instance];
369                 ocs_list_init(&eq->cq_list, hw_cq_t, link);
370
371                 eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
372                 if (eq->wq_array == NULL) {
373                         ocs_free(hw->os, eq, sizeof(*eq));
374                         eq = NULL;
375                 } else {
376                         if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) {
377                                 ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance);
378                                 ocs_free(hw->os, eq, sizeof(*eq));
379                                 eq = NULL;
380                         } else {
381                                 sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
382                                 hw->hw_eq[eq->instance] = eq;
383                                 ocs_list_add_tail(&hw->eq_list, eq);
384                                 ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id,
385                                         eq->entry_count);
386                         }
387                 }
388         }
389         return eq;
390 }
391
392 /**
393  * @brief Allocate a new CQ object
394  *
395  * A new CQ object is instantiated
396  *
397  * @param eq pointer to parent EQ object
398  * @param entry_count number of entries in the CQ
399  *
400  * @return pointer to allocated CQ object
401  */
402 hw_cq_t*
403 hw_new_cq(hw_eq_t *eq, uint32_t entry_count)
404 {
405         ocs_hw_t *hw = eq->hw;
406         hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
407
408         if (cq != NULL) {
409                 cq->eq = eq;
410                 cq->type = SLI_QTYPE_CQ;
411                 cq->instance = eq->hw->cq_count++;
412                 cq->entry_count = entry_count;
413                 cq->queue = &hw->cq[cq->instance];
414
415                 ocs_list_init(&cq->q_list, hw_q_t, link);
416
417                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) {
418                         ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
419                                 eq->instance,
420                                 eq->entry_count);
421                         ocs_free(hw->os, cq, sizeof(*cq));
422                         cq = NULL;
423                 } else {
424                         hw->hw_cq[cq->instance] = cq;
425                         ocs_list_add_tail(&eq->cq_list, cq);
426                         ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id,
427                                 cq->entry_count);
428                 }
429         }
430         return cq;
431 }
432
433 /**
434  * @brief Allocate a new CQ Set of objects.
435  *
436  * @param eqs pointer to a set of EQ objects.
437  * @param cqs pointer to a set of CQ objects to be returned.
438  * @param num_cqs number of CQ queues in the set.
439  * @param entry_count number of entries in the CQ.
440  *
441  * @return 0 on success and -1 on failure.
442  */
443 uint32_t
444 hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count)
445 {
446         uint32_t i;
447         ocs_hw_t *hw = eqs[0]->hw;
448         sli4_t *sli4 = &hw->sli;
449         hw_cq_t *cq = NULL;
450         sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT];
451
452         /* Initialise CQS pointers to NULL */
453         for (i = 0; i < num_cqs; i++) {
454                 cqs[i] = NULL;
455         }
456
457         for (i = 0; i < num_cqs; i++) {
458                 cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
459                 if (cq == NULL)
460                         goto error;
461
462                 cqs[i]          = cq;
463                 cq->eq          = eqs[i];
464                 cq->type        = SLI_QTYPE_CQ;
465                 cq->instance    = hw->cq_count++;
466                 cq->entry_count = entry_count;
467                 cq->queue       = &hw->cq[cq->instance];
468                 qs[i]           = cq->queue;
469                 assocs[i]       = eqs[i]->queue;
470                 ocs_list_init(&cq->q_list, hw_q_t, link);
471         }
472
473         if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) {
474                 ocs_log_err(NULL, "Failed to create CQ Set. \n");
475                 goto error;
476         }
477
478         for (i = 0; i < num_cqs; i++) {
479                 hw->hw_cq[cqs[i]->instance] = cqs[i];
480                 ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]);
481         }
482
483         return 0;
484
485 error:
486         for (i = 0; i < num_cqs; i++) {
487                 if (cqs[i]) {
488                         ocs_free(hw->os, cqs[i], sizeof(*cqs[i]));
489                         cqs[i] = NULL;
490                 }
491         }
492         return -1;
493 }
494
495
496 /**
497  * @brief Allocate a new MQ object
498  *
499  * A new MQ object is instantiated
500  *
501  * @param cq pointer to parent CQ object
502  * @param entry_count number of entries in the MQ
503  *
504  * @return pointer to allocated MQ object
505  */
506 hw_mq_t*
507 hw_new_mq(hw_cq_t *cq, uint32_t entry_count)
508 {
509         ocs_hw_t *hw = cq->eq->hw;
510         hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT);
511
512         if (mq != NULL) {
513                 mq->cq = cq;
514                 mq->type = SLI_QTYPE_MQ;
515                 mq->instance = cq->eq->hw->mq_count++;
516                 mq->entry_count = entry_count;
517                 mq->entry_size = OCS_HW_MQ_DEPTH;
518                 mq->queue = &hw->mq[mq->instance];
519
520                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ,
521                                     mq->queue,
522                                     mq->entry_size,
523                                     cq->queue, 0)) {
524                         ocs_log_err(hw->os, "MQ allocation failure\n");
525                         ocs_free(hw->os, mq, sizeof(*mq));
526                         mq = NULL;
527                 } else {
528                         hw->hw_mq[mq->instance] = mq;
529                         ocs_list_add_tail(&cq->q_list, mq);
530                         ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id,
531                                 mq->entry_count);
532                 }
533         }
534         return mq;
535 }
536
537 /**
538  * @brief Allocate a new WQ object
539  *
540  * A new WQ object is instantiated
541  *
542  * @param cq pointer to parent CQ object
543  * @param entry_count number of entries in the WQ
544  * @param class WQ class
545  * @param ulp index of chute
546  *
547  * @return pointer to allocated WQ object
548  */
549 hw_wq_t*
550 hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp)
551 {
552         ocs_hw_t *hw = cq->eq->hw;
553         hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT);
554
555         if (wq != NULL) {
556                 wq->hw = cq->eq->hw;
557                 wq->cq = cq;
558                 wq->type = SLI_QTYPE_WQ;
559                 wq->instance = cq->eq->hw->wq_count++;
560                 wq->entry_count = entry_count;
561                 wq->queue = &hw->wq[wq->instance];
562                 wq->ulp = ulp;
563                 wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT;
564                 wq->wqec_count = wq->wqec_set_count;
565                 wq->free_count = wq->entry_count - 1;
566                 wq->class = class;
567                 ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link);
568
569                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) {
570                         ocs_log_err(hw->os, "WQ allocation failure\n");
571                         ocs_free(hw->os, wq, sizeof(*wq));
572                         wq = NULL;
573                 } else {
574                         hw->hw_wq[wq->instance] = wq;
575                         ocs_list_add_tail(&cq->q_list, wq);
576                         ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id,
577                                 wq->entry_count, wq->class, wq->ulp);
578                 }
579         }
580         return wq;
581 }
582
583 /**
584  * @brief Allocate a hw_rq_t object
585  *
586  * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair)
587  *
588  * @param cq pointer to parent CQ object
589  * @param entry_count number of entries in the RQs
590  * @param ulp ULP index for this RQ
591  *
592  * @return pointer to newly allocated hw_rq_t
593  */
594 hw_rq_t*
595 hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp)
596 {
597         ocs_hw_t *hw = cq->eq->hw;
598         hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
599         uint32_t max_hw_rq;
600
601         ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
602
603
604         if (rq != NULL) {
605                 rq->instance = hw->hw_rq_count++;
606                 rq->cq = cq;
607                 rq->type = SLI_QTYPE_RQ;
608                 rq->ulp = ulp;
609
610                 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
611
612                 /* Create the header RQ */
613                 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
614                 rq->hdr = &hw->rq[hw->rq_count];
615                 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
616
617                 if (sli_fc_rq_alloc(&hw->sli, rq->hdr,
618                                     rq->entry_count,
619                                     rq->hdr_entry_size,
620                                     cq->queue,
621                                     ulp, TRUE)) {
622                         ocs_log_err(hw->os, "RQ allocation failure - header\n");
623                         ocs_free(hw->os, rq, sizeof(*rq));
624                         return NULL;
625                 }
626                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;  /* Update hw_rq_lookup[] */
627                 hw->rq_count++;
628                 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr  size %4d ulp %d\n",
629                         rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp);
630
631                 /* Create the default data RQ */
632                 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
633                 rq->data = &hw->rq[hw->rq_count];
634                 rq->data_entry_size = hw->config.rq_default_buffer_size;
635
636                 if (sli_fc_rq_alloc(&hw->sli, rq->data,
637                                     rq->entry_count,
638                                     rq->data_entry_size,
639                                     cq->queue,
640                                     ulp, FALSE)) {
641                         ocs_log_err(hw->os, "RQ allocation failure - first burst\n");
642                         ocs_free(hw->os, rq, sizeof(*rq));
643                         return NULL;
644                 }
645                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;  /* Update hw_rq_lookup[] */
646                 hw->rq_count++;
647                 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance,
648                         rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp);
649
650                 hw->hw_rq[rq->instance] = rq;
651                 ocs_list_add_tail(&cq->q_list, rq);
652
653                 rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
654                                             rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
655                 if (rq->rq_tracker == NULL) {
656                         ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
657                         return NULL;
658                 }
659         }
660         return rq;
661 }
662
663
664 /**
665  * @brief Allocate a hw_rq_t object SET
666  *
667  * Allocate an RQ object SET, where each element in set
668  * encapsulates 2 SLI queues (for rq pair)
669  *
670  * @param cqs pointers to be associated with RQs.
671  * @param rqs RQ pointers to be returned on success.
672  * @param num_rq_pairs number of rq pairs in the Set.
673  * @param entry_count number of entries in the RQs
674  * @param ulp ULP index for this RQ
675  *
676  * @return 0 in success and -1 on failure.
677  */
678 uint32_t
679 hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp)
680 {
681         ocs_hw_t *hw = cqs[0]->eq->hw;
682         hw_rq_t *rq = NULL;
683         sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL };
684         uint32_t max_hw_rq, i, q_count;
685
686         ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
687
688         /* Initialise RQS pointers */
689         for (i = 0; i < num_rq_pairs; i++) {
690                 rqs[i] = NULL;
691         }
692
693         for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
694                 rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
695                 if (rq == NULL)
696                         goto error;
697
698                 rqs[i] = rq;
699                 rq->instance = hw->hw_rq_count++;
700                 rq->cq = cqs[i];
701                 rq->type = SLI_QTYPE_RQ;
702                 rq->ulp = ulp;
703                 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
704
705                 /* Header RQ */
706                 rq->hdr = &hw->rq[hw->rq_count];
707                 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
708                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
709                 hw->rq_count++;
710                 qs[q_count] = rq->hdr;
711
712                 /* Data RQ */
713                 rq->data = &hw->rq[hw->rq_count];
714                 rq->data_entry_size = hw->config.rq_default_buffer_size;
715                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
716                 hw->rq_count++;
717                 qs[q_count + 1] = rq->data;
718
719                 rq->rq_tracker = NULL;
720         }
721
722         if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
723                             cqs[0]->queue->id,
724                             rqs[0]->entry_count,
725                             rqs[0]->hdr_entry_size,
726                             rqs[0]->data_entry_size,
727                             ulp)) {
728                 ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id);
729                 goto error;
730         }
731
732
733         for (i = 0; i < num_rq_pairs; i++) {
734                 hw->hw_rq[rqs[i]->instance] = rqs[i];
735                 ocs_list_add_tail(&cqs[i]->q_list, rqs[i]);
736                 rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
737                                             rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
738                 if (rqs[i]->rq_tracker == NULL) {
739                         ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
740                         goto error;
741                 }
742         }
743
744         return 0;
745
746 error:
747         for (i = 0; i < num_rq_pairs; i++) {
748                 if (rqs[i] != NULL) {
749                         if (rqs[i]->rq_tracker != NULL) {
750                                 ocs_free(hw->os, rqs[i]->rq_tracker,
751                                          sizeof(ocs_hw_sequence_t*) *
752                                          rqs[i]->entry_count);
753                         }
754                         ocs_free(hw->os, rqs[i], sizeof(*rqs[i]));
755                 }
756         }
757
758         return -1;
759 }
760
761
762 /**
763  * @brief Free an EQ object
764  *
765  * The EQ object and any child queue objects are freed
766  *
767  * @param eq pointer to EQ object
768  *
769  * @return none
770  */
771 void
772 hw_del_eq(hw_eq_t *eq)
773 {
774         if (eq != NULL) {
775                 hw_cq_t *cq;
776                 hw_cq_t *cq_next;
777
778                 ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) {
779                         hw_del_cq(cq);
780                 }
781                 ocs_varray_free(eq->wq_array);
782                 ocs_list_remove(&eq->hw->eq_list, eq);
783                 eq->hw->hw_eq[eq->instance] = NULL;
784                 ocs_free(eq->hw->os, eq, sizeof(*eq));
785         }
786 }
787
788 /**
789  * @brief Free a CQ object
790  *
791  * The CQ object and any child queue objects are freed
792  *
793  * @param cq pointer to CQ object
794  *
795  * @return none
796  */
797 void
798 hw_del_cq(hw_cq_t *cq)
799 {
800         if (cq != NULL) {
801                 hw_q_t *q;
802                 hw_q_t *q_next;
803
804                 ocs_list_foreach_safe(&cq->q_list, q, q_next) {
805                         switch(q->type) {
806                         case SLI_QTYPE_MQ:
807                                 hw_del_mq((hw_mq_t*) q);
808                                 break;
809                         case SLI_QTYPE_WQ:
810                                 hw_del_wq((hw_wq_t*) q);
811                                 break;
812                         case SLI_QTYPE_RQ:
813                                 hw_del_rq((hw_rq_t*) q);
814                                 break;
815                         default:
816                                 break;
817                         }
818                 }
819                 ocs_list_remove(&cq->eq->cq_list, cq);
820                 cq->eq->hw->hw_cq[cq->instance] = NULL;
821                 ocs_free(cq->eq->hw->os, cq, sizeof(*cq));
822         }
823 }
824
825 /**
826  * @brief Free a MQ object
827  *
828  * The MQ object is freed
829  *
830  * @param mq pointer to MQ object
831  *
832  * @return none
833  */
834 void
835 hw_del_mq(hw_mq_t *mq)
836 {
837         if (mq != NULL) {
838                 ocs_list_remove(&mq->cq->q_list, mq);
839                 mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
840                 ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq));
841         }
842 }
843
844 /**
845  * @brief Free a WQ object
846  *
847  * The WQ object is freed
848  *
849  * @param wq pointer to WQ object
850  *
851  * @return none
852  */
853 void
854 hw_del_wq(hw_wq_t *wq)
855 {
856         if (wq != NULL) {
857                 ocs_list_remove(&wq->cq->q_list, wq);
858                 wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
859                 ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq));
860         }
861 }
862
863 /**
864  * @brief Free an RQ object
865  *
866  * The RQ object is freed
867  *
868  * @param rq pointer to RQ object
869  *
870  * @return none
871  */
872 void
873 hw_del_rq(hw_rq_t *rq)
874 {
875
876         if (rq != NULL) {
877                 ocs_hw_t *hw = rq->cq->eq->hw;
878                 /* Free RQ tracker */
879                 if (rq->rq_tracker != NULL) {
880                         ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count);
881                         rq->rq_tracker = NULL;
882                 }
883                 ocs_list_remove(&rq->cq->q_list, rq);
884                 hw->hw_rq[rq->instance] = NULL;
885                 ocs_free(hw->os, rq, sizeof(*rq));
886         }
887 }
888
889 /**
890  * @brief Display HW queue objects
891  *
892  * The HW queue objects are displayed using ocs_log
893  *
894  * @param hw pointer to HW object
895  *
896  * @return none
897  */
898 void
899 hw_queue_dump(ocs_hw_t *hw)
900 {
901         hw_eq_t *eq;
902         hw_cq_t *cq;
903         hw_q_t *q;
904         hw_mq_t *mq;
905         hw_wq_t *wq;
906         hw_rq_t *rq;
907
908         ocs_list_foreach(&hw->eq_list, eq) {
909                 ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id);
910                 ocs_list_foreach(&eq->cq_list, cq) {
911                         ocs_printf("  cq[%d] id %2d current\n", cq->instance, cq->queue->id);
912                         ocs_list_foreach(&cq->q_list, q) {
913                                 switch(q->type) {
914                                 case SLI_QTYPE_MQ:
915                                         mq = (hw_mq_t *) q;
916                                         ocs_printf("    mq[%d] id %2d\n", mq->instance, mq->queue->id);
917                                         break;
918                                 case SLI_QTYPE_WQ:
919                                         wq = (hw_wq_t *) q;
920                                         ocs_printf("    wq[%d] id %2d\n", wq->instance, wq->queue->id);
921                                         break;
922                                 case SLI_QTYPE_RQ:
923                                         rq = (hw_rq_t *) q;
924                                         ocs_printf("    rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id);
925                                         break;
926                                 default:
927                                         break;
928                                 }
929                         }
930                 }
931         }
932 }
933
934 /**
935  * @brief Teardown HW queue objects
936  *
937  * The HW queue objects are freed
938  *
939  * @param hw pointer to HW object
940  *
941  * @return none
942  */
943 void
944 hw_queue_teardown(ocs_hw_t *hw)
945 {
946         uint32_t i;
947         hw_eq_t *eq;
948         hw_eq_t *eq_next;
949
950         if (ocs_list_valid(&hw->eq_list)) {
951                 ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) {
952                         hw_del_eq(eq);
953                 }
954         }
955         for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
956                 ocs_varray_free(hw->wq_cpu_array[i]);
957                 hw->wq_cpu_array[i] = NULL;
958         }
959         for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
960                 ocs_varray_free(hw->wq_class_array[i]);
961                 hw->wq_class_array[i] = NULL;
962         }
963 }
964
965 /**
966  * @brief Allocate a WQ to an IO object
967  *
968  * The next work queue index is used to assign a WQ to an IO.
969  *
970  * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is
971  * selected.
972  *
973  * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that
974  * the IO request came in on is selected.
975  *
976  * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associted with the
977  * CPU the request is made on is selected.
978  *
979  * @param hw pointer to HW object
980  * @param io pointer to IO object
981  *
982  * @return Return pointer to next WQ
983  */
984 hw_wq_t *
985 ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io)
986 {
987         hw_eq_t *eq;
988         hw_wq_t *wq = NULL;
989
990         switch(io->wq_steering) {
991         case OCS_HW_WQ_STEERING_CLASS:
992                 if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) {
993                         wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]);
994                 }
995                 break;
996         case OCS_HW_WQ_STEERING_REQUEST:
997                 eq = io->eq;
998                 if (likely(eq != NULL)) {
999                         wq = ocs_varray_iter_next(eq->wq_array);
1000                 }
1001                 break;
1002         case OCS_HW_WQ_STEERING_CPU: {
1003                 uint32_t cpuidx = ocs_thread_getcpu();
1004
1005                 if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) {
1006                         wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]);
1007                 }
1008                 break;
1009         }
1010         }
1011
1012         if (unlikely(wq == NULL)) {
1013                 wq = hw->hw_wq[0];
1014         }
1015
1016         return wq;
1017 }
1018
1019 /**
1020  * @brief Return count of EQs for a queue topology object
1021  *
1022  * The EQ count for in the HWs queue topology (hw->qtop) object is returned
1023  *
1024  * @param hw pointer to HW object
1025  *
1026  * @return count of EQs
1027  */
1028 uint32_t
1029 ocs_hw_qtop_eq_count(ocs_hw_t *hw)
1030 {
1031         return hw->qtop->entry_counts[QTOP_EQ];
1032 }
1033
1034 #define TOKEN_LEN               32
1035
1036 /**
1037  * @brief return string given a QTOP entry
1038  *
1039  * @param entry QTOP entry
1040  *
1041  * @return returns string or "unknown"
1042  */
1043 #if HW_QTOP_DEBUG
1044 static char *
1045 qtopentry2s(ocs_hw_qtop_entry_e entry) {
1046         switch(entry) {
1047         #define P(x)    case x: return #x;
1048         P(QTOP_EQ)
1049         P(QTOP_CQ)
1050         P(QTOP_WQ)
1051         P(QTOP_RQ)
1052         P(QTOP_MQ)
1053         P(QTOP_THREAD_START)
1054         P(QTOP_THREAD_END)
1055         P(QTOP_LAST)
1056         #undef P
1057         }
1058         return "unknown";
1059 }
1060 #endif
1061
1062 /**
1063  * @brief Declare token types
1064  */
1065 typedef enum {
1066         TOK_LPAREN = 1,
1067         TOK_RPAREN,
1068         TOK_COLON,
1069         TOK_EQUALS,
1070         TOK_QUEUE,
1071         TOK_ATTR_NAME,
1072         TOK_NUMBER,
1073         TOK_NUMBER_VALUE,
1074         TOK_NUMBER_LIST,
1075 } tok_type_e;
1076
1077 /**
1078  * @brief Declare token sub-types
1079  */
1080 typedef enum {
1081         TOK_SUB_EQ = 100,
1082         TOK_SUB_CQ,
1083         TOK_SUB_RQ,
1084         TOK_SUB_MQ,
1085         TOK_SUB_WQ,
1086         TOK_SUB_LEN,
1087         TOK_SUB_CLASS,
1088         TOK_SUB_ULP,
1089         TOK_SUB_FILTER,
1090 } tok_subtype_e;
1091
1092 /**
1093  * @brief convert queue subtype to QTOP entry
1094  *
1095  * @param q queue subtype
1096  *
1097  * @return QTOP entry or 0
1098  */
1099 static ocs_hw_qtop_entry_e
1100 subtype2qtop(tok_subtype_e q)
1101 {
1102         switch(q) {
1103         case TOK_SUB_EQ:        return QTOP_EQ;
1104         case TOK_SUB_CQ:        return QTOP_CQ;
1105         case TOK_SUB_RQ:        return QTOP_RQ;
1106         case TOK_SUB_MQ:        return QTOP_MQ;
1107         case TOK_SUB_WQ:        return QTOP_WQ;
1108         default:
1109                 break;
1110         }
1111         return 0;
1112 }
1113
1114 /**
1115  * @brief Declare token object
1116  */
1117 typedef struct {
1118         tok_type_e type;
1119         tok_subtype_e subtype;
1120         char string[TOKEN_LEN];
1121 } tok_t;
1122
1123 /**
1124  * @brief Declare token array object
1125  */
1126 typedef struct {
1127         tok_t *tokens;                  /* Pointer to array of tokens */
1128         uint32_t alloc_count;           /* Number of tokens in the array */
1129         uint32_t inuse_count;           /* Number of tokens posted to array */
1130         uint32_t iter_idx;              /* Iterator index */
1131 } tokarray_t;
1132
1133 /**
1134  * @brief Declare token match structure
1135  */
1136 typedef struct {
1137         char *s;
1138         tok_type_e type;
1139         tok_subtype_e subtype;
1140 } tokmatch_t;
1141
1142 /**
1143  * @brief test if character is ID start character
1144  *
1145  * @param c character to test
1146  *
1147  * @return TRUE if character is an ID start character
1148  */
1149 static int32_t
1150 idstart(int c)
1151 {
1152         return  isalpha(c) || (c == '_') || (c == '$');
1153 }
1154
1155 /**
1156  * @brief test if character is an ID character
1157  *
1158  * @param c character to test
1159  *
1160  * @return TRUE if character is an ID character
1161  */
1162 static int32_t
1163 idchar(int c)
1164 {
1165         return idstart(c) || ocs_isdigit(c);
1166 }
1167
1168 /**
1169  * @brief Declare single character matches
1170  */
1171 static tokmatch_t cmatches[] = {
1172         {"(", TOK_LPAREN},
1173         {")", TOK_RPAREN},
1174         {":", TOK_COLON},
1175         {"=", TOK_EQUALS},
1176 };
1177
1178 /**
1179  * @brief Declare identifier match strings
1180  */
1181 static tokmatch_t smatches[] = {
1182         {"eq", TOK_QUEUE, TOK_SUB_EQ},
1183         {"cq", TOK_QUEUE, TOK_SUB_CQ},
1184         {"rq", TOK_QUEUE, TOK_SUB_RQ},
1185         {"mq", TOK_QUEUE, TOK_SUB_MQ},
1186         {"wq", TOK_QUEUE, TOK_SUB_WQ},
1187         {"len", TOK_ATTR_NAME, TOK_SUB_LEN},
1188         {"class", TOK_ATTR_NAME, TOK_SUB_CLASS},
1189         {"ulp", TOK_ATTR_NAME, TOK_SUB_ULP},
1190         {"filter", TOK_ATTR_NAME, TOK_SUB_FILTER},
1191 };
1192
1193 /**
1194  * @brief Scan string and return next token
1195  *
1196  * The string is scanned and the next token is returned
1197  *
1198  * @param s input string to scan
1199  * @param tok pointer to place scanned token
1200  *
1201  * @return pointer to input string following scanned token, or NULL
1202  */
1203 static const char *
1204 tokenize(const char *s, tok_t *tok)
1205 {
1206         uint32_t i;
1207
1208         memset(tok, 0, sizeof(*tok));
1209
1210         /* Skip over whitespace */
1211         while (*s && ocs_isspace(*s)) {
1212                 s++;
1213         }
1214
1215         /* Return if nothing left in this string */
1216         if (*s == 0) {
1217                 return NULL;
1218         }
1219
1220         /* Look for single character matches */
1221         for (i = 0; i < ARRAY_SIZE(cmatches); i++) {
1222                 if (cmatches[i].s[0] == *s) {
1223                         tok->type = cmatches[i].type;
1224                         tok->subtype = cmatches[i].subtype;
1225                         tok->string[0] = *s++;
1226                         return s;
1227                 }
1228         }
1229
1230         /* Scan for a hex number or decimal */
1231         if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) {
1232                 char *p = tok->string;
1233
1234                 tok->type = TOK_NUMBER;
1235
1236                 *p++ = *s++;
1237                 *p++ = *s++;
1238                 while ((*s == '.') || ocs_isxdigit(*s)) {
1239                         if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1240                                 *p++ = *s;
1241                         }
1242                         if (*s == ',') {
1243                                 tok->type = TOK_NUMBER_LIST;
1244                         }
1245                         s++;
1246                 }
1247                 *p = 0;
1248                 return s;
1249         } else if (ocs_isdigit(*s)) {
1250                 char *p = tok->string;
1251
1252                 tok->type = TOK_NUMBER;
1253                 while ((*s == ',') || ocs_isdigit(*s)) {
1254                         if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1255                                 *p++ = *s;
1256                         }
1257                         if (*s == ',') {
1258                                 tok->type = TOK_NUMBER_LIST;
1259                         }
1260                         s++;
1261                 }
1262                 *p = 0;
1263                 return s;
1264         }
1265
1266         /* Scan for an ID */
1267         if (idstart(*s)) {
1268                 char *p = tok->string;
1269
1270                 for (*p++ = *s++; idchar(*s); s++) {
1271                         if ((p - tok->string) < TOKEN_LEN) {
1272                                 *p++ = *s;
1273                         }
1274                 }
1275
1276                 /* See if this is a $ number value */
1277                 if (tok->string[0] == '$') {
1278                         tok->type = TOK_NUMBER_VALUE;
1279                 } else {
1280                         /* Look for a string match */
1281                         for (i = 0; i < ARRAY_SIZE(smatches); i++) {
1282                                 if (strcmp(smatches[i].s, tok->string) == 0) {
1283                                         tok->type = smatches[i].type;
1284                                         tok->subtype = smatches[i].subtype;
1285                                         return s;
1286                                 }
1287                         }
1288                 }
1289         }
1290         return s;
1291 }
1292
1293 /**
1294  * @brief convert token type to string
1295  *
1296  * @param type token type
1297  *
1298  * @return string, or "unknown"
1299  */
1300 static const char *
1301 token_type2s(tok_type_e type)
1302 {
1303         switch(type) {
1304         #define P(x)    case x: return #x;
1305         P(TOK_LPAREN)
1306         P(TOK_RPAREN)
1307         P(TOK_COLON)
1308         P(TOK_EQUALS)
1309         P(TOK_QUEUE)
1310         P(TOK_ATTR_NAME)
1311         P(TOK_NUMBER)
1312         P(TOK_NUMBER_VALUE)
1313         P(TOK_NUMBER_LIST)
1314         #undef P
1315         }
1316         return "unknown";
1317 }
1318
1319 /**
1320  * @brief convert token sub-type to string
1321  *
1322  * @param subtype token sub-type
1323  *
1324  * @return string, or "unknown"
1325  */
1326 static const char *
1327 token_subtype2s(tok_subtype_e subtype)
1328 {
1329         switch(subtype) {
1330         #define P(x)    case x: return #x;
1331         P(TOK_SUB_EQ)
1332         P(TOK_SUB_CQ)
1333         P(TOK_SUB_RQ)
1334         P(TOK_SUB_MQ)
1335         P(TOK_SUB_WQ)
1336         P(TOK_SUB_LEN)
1337         P(TOK_SUB_CLASS)
1338         P(TOK_SUB_ULP)
1339         P(TOK_SUB_FILTER)
1340         #undef P
1341         }
1342         return "";
1343 }
1344
1345 /**
1346  * @brief Generate syntax error message
1347  *
1348  * A syntax error message is found, the input tokens are dumped up to and including
1349  * the token that failed as indicated by the current iterator index.
1350  *
1351  * @param hw pointer to HW object
1352  * @param tokarray pointer to token array object
1353  *
1354  * @return none
1355  */
1356 static void
1357 tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray)
1358 {
1359         uint32_t i;
1360         tok_t *tok;
1361
1362         ocs_log_test(hw->os, "Syntax error:\n");
1363
1364         for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) {
1365                 ocs_log_test(hw->os, "%s [%2d]    %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : "   ", i,
1366                         token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string);
1367         }
1368 }
1369
1370 /**
1371  * @brief parse a number
1372  *
1373  * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value
1374  *
1375  * @param hw pointer to HW object
1376  * @param qtop pointer to QTOP object
1377  * @param tok pointer to token to parse
1378  *
1379  * @return numeric value
1380  */
1381 static uint32_t
1382 tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok)
1383 {
1384         uint32_t rval = 0;
1385         uint32_t num_cpus = ocs_get_num_cpus();
1386
1387         switch(tok->type) {
1388         case TOK_NUMBER_VALUE:
1389                 if (ocs_strcmp(tok->string, "$ncpu") == 0) {
1390                         rval = num_cpus;
1391                 } else if (ocs_strcmp(tok->string, "$ncpu1") == 0) {
1392                         rval = num_cpus - 1;
1393                 } else if (ocs_strcmp(tok->string, "$nwq") == 0) {
1394                         if (hw != NULL) {
1395                                 rval = hw->config.n_wq;
1396                         }
1397                 } else if (ocs_strcmp(tok->string, "$maxmrq") == 0) {
1398                         rval = MIN(num_cpus, OCS_HW_MAX_MRQS);
1399                 } else if (ocs_strcmp(tok->string, "$nulp") == 0) {
1400                         rval = hw->ulp_max - hw->ulp_start + 1;
1401                 } else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) {
1402                         rval = qtop->rptcount[qtop->rptcount_idx-1];
1403                 } else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) {
1404                         rval = qtop->rptcount[qtop->rptcount_idx-2];
1405                 } else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) {
1406                         rval = qtop->rptcount[qtop->rptcount_idx-3];
1407                 } else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) {
1408                         rval = qtop->rptcount[qtop->rptcount_idx-4];
1409                 } else {
1410                         rval = ocs_strtoul(tok->string, 0, 0);
1411                 }
1412                 break;
1413         case TOK_NUMBER:
1414                 rval = ocs_strtoul(tok->string, 0, 0);
1415                 break;
1416         default:
1417                 break;
1418         }
1419         return rval;
1420 }
1421
1422
1423 /**
1424  * @brief parse an array of tokens
1425  *
1426  * The tokens are semantically parsed, to generate QTOP entries.
1427  *
1428  * @param hw pointer to HW object
1429  * @param tokarray array array of tokens
1430  * @param qtop ouptut QTOP object
1431  *
1432  * @return returns 0 for success, a negative error code value for failure.
1433  */
1434 static int32_t
1435 parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop)
1436 {
1437         ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count;
1438         tok_t *tok;
1439
1440         for (; (tokarray->iter_idx < tokarray->inuse_count) &&
1441              ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) {
1442                 if (qtop->inuse_count >= qtop->alloc_count) {
1443                         return -1;
1444                 }
1445
1446                 qt = qtop->entries + qtop->inuse_count;
1447
1448                 switch (tok[0].type)
1449                 {
1450                 case TOK_QUEUE:
1451                         qt->entry = subtype2qtop(tok[0].subtype);
1452                         qt->set_default = FALSE;
1453                         qt->len = 0;
1454                         qt->class = 0;
1455                         qtop->inuse_count++;
1456
1457                         tokarray->iter_idx++;           /* Advance current token index */
1458
1459                         /* Parse for queue attributes, possibly multiple instances */
1460                         while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) {
1461                                 tok = &tokarray->tokens[tokarray->iter_idx];
1462                                 if(     (tok[0].type == TOK_COLON) &&
1463                                         (tok[1].type == TOK_ATTR_NAME) &&
1464                                         (tok[2].type == TOK_EQUALS) &&
1465                                         ((tok[3].type == TOK_NUMBER) ||
1466                                          (tok[3].type == TOK_NUMBER_VALUE) ||
1467                                          (tok[3].type == TOK_NUMBER_LIST))) {
1468
1469                                         switch (tok[1].subtype) {
1470                                         case TOK_SUB_LEN:
1471                                                 qt->len = tok_getnumber(hw, qtop, &tok[3]);
1472                                                 break;
1473
1474                                         case TOK_SUB_CLASS:
1475                                                 qt->class = tok_getnumber(hw, qtop, &tok[3]);
1476                                                 break;
1477
1478                                         case TOK_SUB_ULP:
1479                                                 qt->ulp = tok_getnumber(hw, qtop, &tok[3]);
1480                                                 break;
1481
1482                                         case TOK_SUB_FILTER:
1483                                                 if (tok[3].type == TOK_NUMBER_LIST) {
1484                                                         uint32_t mask = 0;
1485                                                         char *p = tok[3].string;
1486
1487                                                         while ((p != NULL) && *p) {
1488                                                                 uint32_t v;
1489
1490                                                                 v = ocs_strtoul(p, 0, 0);
1491                                                                 if (v < 32) {
1492                                                                         mask |= (1U << v);
1493                                                                 }
1494
1495                                                                 p = ocs_strchr(p, ',');
1496                                                                 if (p != NULL) {
1497                                                                         p++;
1498                                                                 }
1499                                                         }
1500                                                         qt->filter_mask = mask;
1501                                                 } else {
1502                                                         qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3]));
1503                                                 }
1504                                                 break;
1505                                         default:
1506                                                 break;
1507                                         }
1508                                         /* Advance current token index */
1509                                         tokarray->iter_idx += 4;
1510                                 } else {
1511                                         break;
1512                                 }
1513                         }
1514                         qtop->entry_counts[qt->entry]++;
1515                         break;
1516
1517                 case TOK_ATTR_NAME:
1518                         if (    ((tokarray->iter_idx + 5) <= tokarray->inuse_count) &&
1519                                 (tok[1].type == TOK_COLON) &&
1520                                 (tok[2].type == TOK_QUEUE) &&
1521                                 (tok[3].type == TOK_EQUALS) &&
1522                                 ((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) {
1523                                 qt->entry = subtype2qtop(tok[2].subtype);
1524                                 qt->set_default = TRUE;
1525                                 switch(tok[0].subtype) {
1526                                 case TOK_SUB_LEN:
1527                                         qt->len = tok_getnumber(hw, qtop, &tok[4]);
1528                                         break;
1529                                 case TOK_SUB_CLASS:
1530                                         qt->class = tok_getnumber(hw, qtop, &tok[4]);
1531                                         break;
1532                                 case TOK_SUB_ULP:
1533                                         qt->ulp = tok_getnumber(hw, qtop, &tok[4]);
1534                                         break;
1535                                 default:
1536                                         break;
1537                                 }
1538                                 qtop->inuse_count++;
1539                                 tokarray->iter_idx += 5;
1540                         } else {
1541                                 tok_syntax(hw, tokarray);
1542                                 return -1;
1543                         }
1544                         break;
1545
1546                 case TOK_NUMBER:
1547                 case TOK_NUMBER_VALUE: {
1548                         uint32_t rpt_count = 1;
1549                         uint32_t i;
1550
1551                         rpt_count = tok_getnumber(hw, qtop, tok);
1552
1553                         if (tok[1].type == TOK_LPAREN) {
1554                                 uint32_t iter_idx_save;
1555
1556                                 tokarray->iter_idx += 2;
1557
1558                                 /* save token array iteration index */
1559                                 iter_idx_save = tokarray->iter_idx;
1560
1561                                 for (i = 0; i < rpt_count; i++) {
1562                                         uint32_t rptcount_idx = qtop->rptcount_idx;
1563
1564                                         if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) {
1565                                                 qtop->rptcount[qtop->rptcount_idx++] = i;
1566                                         }
1567
1568                                         /* restore token array iteration index */
1569                                         tokarray->iter_idx = iter_idx_save;
1570
1571                                         /* parse, append to qtop */
1572                                         parse_topology(hw, tokarray, qtop);
1573
1574                                         qtop->rptcount_idx = rptcount_idx;
1575                                 }
1576                         }
1577                         break;
1578                 }
1579
1580                 case TOK_RPAREN:
1581                         tokarray->iter_idx++;
1582                         return 0;
1583
1584                 default:
1585                         tok_syntax(hw, tokarray);
1586                         return -1;
1587                 }
1588         }
1589         return 0;
1590 }
1591
1592 /**
1593  * @brief Parse queue topology string
1594  *
1595  * The queue topology object is allocated, and filled with the results of parsing the
1596  * passed in queue topology string
1597  *
1598  * @param hw pointer to HW object
1599  * @param qtop_string input queue topology string
1600  *
1601  * @return pointer to allocated QTOP object, or NULL if there was an error
1602  */
1603 ocs_hw_qtop_t *
1604 ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string)
1605 {
1606         ocs_hw_qtop_t *qtop;
1607         tokarray_t tokarray;
1608         const char *s;
1609 #if HW_QTOP_DEBUG
1610         uint32_t i;
1611         ocs_hw_qtop_entry_t *qt;
1612 #endif
1613
1614         ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string);
1615
1616         /* Allocate a token array */
1617         tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT);
1618         if (tokarray.tokens == NULL) {
1619                 return NULL;
1620         }
1621         tokarray.alloc_count = MAX_TOKENS;
1622         tokarray.inuse_count = 0;
1623         tokarray.iter_idx = 0;
1624
1625         /* Parse the tokens */
1626         for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) &&
1627              ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) {
1628                 tokarray.inuse_count++;
1629         }
1630
1631         /* Allocate a queue topology structure */
1632         qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT);
1633         if (qtop == NULL) {
1634                 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1635                 ocs_log_err(hw->os, "malloc qtop failed\n");
1636                 return NULL;
1637         }
1638         qtop->os = hw->os;
1639
1640         /* Allocate queue topology entries */
1641         qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT);
1642         if (qtop->entries == NULL) {
1643                 ocs_log_err(hw->os, "malloc qtop entries failed\n");
1644                 ocs_free(hw->os, qtop, sizeof(*qtop));
1645                 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1646                 return NULL;
1647         }
1648         qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES;
1649         qtop->inuse_count = 0;
1650
1651         /* Parse the tokens */
1652         parse_topology(hw, &tokarray, qtop);
1653 #if HW_QTOP_DEBUG
1654         for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
1655                 ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len,
1656                        qt->class, qt->ulp);
1657         }
1658 #endif
1659
1660         /* Free the tokens array */
1661         ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1662
1663         return qtop;
1664 }
1665
1666 /**
1667  * @brief free queue topology object
1668  *
1669  * @param qtop pointer to QTOP object
1670  *
1671  * @return none
1672  */
1673 void
1674 ocs_hw_qtop_free(ocs_hw_qtop_t *qtop)
1675 {
1676         if (qtop != NULL) {
1677                 if (qtop->entries != NULL) {
1678                         ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries));
1679                 }
1680                 ocs_free(qtop->os, qtop, sizeof(*qtop));
1681         }
1682 }
1683
1684 /* Uncomment this to turn on RQ debug */
1685 // #define ENABLE_DEBUG_RQBUF
1686
1687 static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id);
1688 static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex);
1689 static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1690 static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1691
1692 /**
1693  * @brief Process receive queue completions for RQ Pair mode.
1694  *
1695  * @par Description
1696  * RQ completions are processed. In RQ pair mode, a single header and single payload
1697  * buffer are received, and passed to the function that has registered for unsolicited
1698  * callbacks.
1699  *
1700  * @param hw Hardware context.
1701  * @param cq Pointer to HW completion queue.
1702  * @param cqe Completion queue entry.
1703  *
1704  * @return Returns 0 for success, or a negative error code value for failure.
1705  */
1706
1707 int32_t
1708 ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1709 {
1710         uint16_t rq_id;
1711         uint32_t index;
1712         int32_t rqindex;
1713         int32_t  rq_status;
1714         uint32_t h_len;
1715         uint32_t p_len;
1716         ocs_hw_sequence_t *seq;
1717
1718         rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1719         if (0 != rq_status) {
1720                 switch (rq_status) {
1721                 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1722                 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1723                         /* just get RQ buffer then return to chip */
1724                         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1725                         if (rqindex < 0) {
1726                                 ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1727                                              rq_status, rq_id);
1728                                 break;
1729                         }
1730
1731                         /* get RQ buffer */
1732                         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1733
1734                         /* return to chip */
1735                         if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1736                                 ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n",
1737                                              rq_status);
1738                                 break;
1739                         }
1740                         break;
1741                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1742                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1743                         /* since RQ buffers were not consumed, cannot return them to chip */
1744                         /* fall through */
1745                         ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1746                 default:
1747                         break;
1748                 }
1749                 return -1;
1750         }
1751
1752         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1753         if (rqindex < 0) {
1754                 ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1755                 return -1;
1756         }
1757
1758         OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1759                  rq->payload_use_count++;})
1760
1761         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1762         ocs_hw_assert(seq != NULL);
1763
1764         seq->hw = hw;
1765         seq->auto_xrdy = 0;
1766         seq->out_of_xris = 0;
1767         seq->xri = 0;
1768         seq->hio = NULL;
1769
1770         sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1771         seq->header->dma.len = h_len;
1772         seq->payload->dma.len = p_len;
1773         seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1774         seq->hw_priv = cq->eq;
1775
1776         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1777         if (hw->config.bounce) {
1778                 fc_header_t *hdr = seq->header->dma.virt;
1779                 uint32_t s_id = fc_be24toh(hdr->s_id);
1780                 uint32_t d_id = fc_be24toh(hdr->d_id);
1781                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1782                 if (hw->callback.bounce != NULL) {
1783                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1784                 }
1785         } else {
1786                 hw->callback.unsolicited(hw->args.unsolicited, seq);
1787         }
1788
1789         return 0;
1790 }
1791
1792 /**
1793  * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy
1794  *
1795  * @par Description
1796  * RQ completions are processed. In RQ pair mode, a single header and single payload
1797  * buffer are received, and passed to the function that has registered for unsolicited
1798  * callbacks.
1799  *
1800  * @param hw Hardware context.
1801  * @param cq Pointer to HW completion queue.
1802  * @param cqe Completion queue entry.
1803  *
1804  * @return Returns 0 for success, or a negative error code value for failure.
1805  */
1806
1807 int32_t
1808 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1809 {
1810         /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1811         sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe;
1812         uint16_t rq_id;
1813         uint32_t index;
1814         int32_t rqindex;
1815         int32_t  rq_status;
1816         uint32_t h_len;
1817         uint32_t p_len;
1818         ocs_hw_sequence_t *seq;
1819         uint8_t axr_lock_taken = 0;
1820 #if defined(OCS_DISC_SPIN_DELAY)
1821         uint32_t        delay = 0;
1822         char            prop_buf[32];
1823 #endif
1824
1825         rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1826         if (0 != rq_status) {
1827                 switch (rq_status) {
1828                 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1829                 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1830                         /* just get RQ buffer then return to chip */
1831                         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1832                         if (rqindex < 0) {
1833                                 ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1834                                             rq_status, rq_id);
1835                                 break;
1836                         }
1837
1838                         /* get RQ buffer */
1839                         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1840
1841                         /* return to chip */
1842                         if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1843                                 ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n",
1844                                             rq_status);
1845                                 break;
1846                         }
1847                         break;
1848                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1849                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1850                         /* since RQ buffers were not consumed, cannot return them to chip */
1851                         ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1852                         /* fall through */
1853                 default:
1854                         break;
1855                 }
1856                 return -1;
1857         }
1858
1859         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1860         if (rqindex < 0) {
1861                 ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1862                 return -1;
1863         }
1864
1865         OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1866                  rq->payload_use_count++;})
1867
1868         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1869         ocs_hw_assert(seq != NULL);
1870
1871         seq->hw = hw;
1872         seq->auto_xrdy = opt_wr->agxr;
1873         seq->out_of_xris = opt_wr->oox;
1874         seq->xri = opt_wr->xri;
1875         seq->hio = NULL;
1876
1877         sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1878         seq->header->dma.len = h_len;
1879         seq->payload->dma.len = p_len;
1880         seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1881         seq->hw_priv = cq->eq;
1882
1883         if (seq->auto_xrdy) {
1884                 fc_header_t *fc_hdr = seq->header->dma.virt;
1885
1886                 seq->hio = ocs_hw_io_lookup(hw, seq->xri);
1887                 ocs_lock(&seq->hio->axr_lock);
1888                 axr_lock_taken = 1;
1889
1890                 /* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */
1891                 seq->hio->axr_buf->fcfi = seq->fcfi;
1892                 seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id;
1893                 seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id;
1894                 seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id;
1895                 seq->hio->axr_buf->cmd_cqe = 1;
1896
1897                 /*
1898                  * Since auto xfer rdy is used for this IO, then clear the sequence
1899                  * initiative bit in the header so that the upper layers wait for the
1900                  * data. This should flow exactly like the first burst case.
1901                  */
1902                 fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE);
1903
1904                 /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
1905                 if (seq->hio->type == OCS_HW_IO_TARGET_RSP) {
1906                         seq->hio->axr_buf->call_axr_cmd = 1;
1907                         seq->hio->axr_buf->cmd_seq = seq;
1908                         goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd;
1909                 }
1910         }
1911
1912         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1913         if (hw->config.bounce) {
1914                 fc_header_t *hdr = seq->header->dma.virt;
1915                 uint32_t s_id = fc_be24toh(hdr->s_id);
1916                 uint32_t d_id = fc_be24toh(hdr->d_id);
1917                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1918                 if (hw->callback.bounce != NULL) {
1919                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1920                 }
1921         } else {
1922                 hw->callback.unsolicited(hw->args.unsolicited, seq);
1923         }
1924
1925         if (seq->auto_xrdy) {
1926                 /* If data cqe came before cmd cqe in out of order in case of AXR */
1927                 if(seq->hio->axr_buf->data_cqe == 1) {
1928
1929 #if defined(OCS_DISC_SPIN_DELAY)
1930                         if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
1931                                 delay = ocs_strtoul(prop_buf, 0, 0);
1932                                 ocs_udelay(delay);
1933                         }
1934 #endif
1935                         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1936                         if (hw->config.bounce) {
1937                                 fc_header_t *hdr = seq->header->dma.virt;
1938                                 uint32_t s_id = fc_be24toh(hdr->s_id);
1939                                 uint32_t d_id = fc_be24toh(hdr->d_id);
1940                                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1941                                 if (hw->callback.bounce != NULL) {
1942                                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id);
1943                                 }
1944                         } else {
1945                                 hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq);
1946                         }
1947                 }
1948         }
1949
1950 exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd:
1951         if(axr_lock_taken) {
1952                 ocs_unlock(&seq->hio->axr_lock);
1953         }
1954         return 0;
1955 }
1956
1957 /**
1958  * @brief Process CQ completions for Auto xfer rdy data phases.
1959  *
1960  * @par Description
1961  * The data is DMA'd into the data buffer posted to the SGL prior to the XRI
1962  * being assigned to an IO. When the completion is received, All of the data
1963  * is in the single buffer.
1964  *
1965  * @param hw Hardware context.
1966  * @param cq Pointer to HW completion queue.
1967  * @param cqe Completion queue entry.
1968  *
1969  * @return Returns 0 for success, or a negative error code value for failure.
1970  */
1971
1972 int32_t
1973 ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1974 {
1975         /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1976         sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe;
1977         ocs_hw_sequence_t *seq;
1978         ocs_hw_io_t *io;
1979         ocs_hw_auto_xfer_rdy_buffer_t *buf;
1980 #if defined(OCS_DISC_SPIN_DELAY)
1981         uint32_t        delay = 0;
1982         char            prop_buf[32];
1983 #endif
1984         /* Look up the IO */
1985         io = ocs_hw_io_lookup(hw, opt_wr->xri);
1986         ocs_lock(&io->axr_lock);
1987         buf = io->axr_buf;
1988         buf->data_cqe = 1;
1989         seq = &buf->seq;
1990         seq->hw = hw;
1991         seq->auto_xrdy = 1;
1992         seq->out_of_xris = 0;
1993         seq->xri = opt_wr->xri;
1994         seq->hio = io;
1995         seq->header = &buf->header;
1996         seq->payload = &buf->payload;
1997
1998         seq->header->dma.len = sizeof(fc_header_t);
1999         seq->payload->dma.len = opt_wr->total_data_placed;
2000         seq->fcfi = buf->fcfi;
2001         seq->hw_priv = cq->eq;
2002
2003
2004         if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) {
2005                 seq->status = OCS_HW_UNSOL_SUCCESS;
2006         } else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) {
2007                 seq->status = OCS_HW_UNSOL_ABTS_RCVD;
2008         } else {
2009                 seq->status = OCS_HW_UNSOL_ERROR;
2010         }
2011
2012         /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
2013         if(io->type == OCS_HW_IO_TARGET_RSP) {
2014                 io->axr_buf->call_axr_data = 1;
2015                 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2016         }
2017
2018         if(!buf->cmd_cqe) {
2019                 /* if data cqe came before cmd cqe, return here, cmd cqe will handle */
2020                 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2021         }
2022 #if defined(OCS_DISC_SPIN_DELAY)
2023         if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
2024                 delay = ocs_strtoul(prop_buf, 0, 0);
2025                 ocs_udelay(delay);
2026         }
2027 #endif
2028
2029         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
2030         if (hw->config.bounce) {
2031                 fc_header_t *hdr = seq->header->dma.virt;
2032                 uint32_t s_id = fc_be24toh(hdr->s_id);
2033                 uint32_t d_id = fc_be24toh(hdr->d_id);
2034                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
2035                 if (hw->callback.bounce != NULL) {
2036                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
2037                 }
2038         } else {
2039                 hw->callback.unsolicited(hw->args.unsolicited, seq);
2040         }
2041
2042 exit_ocs_hw_rqpair_process_auto_xfr_rdy_data:
2043         ocs_unlock(&io->axr_lock);
2044         return 0;
2045 }
2046
2047 /**
2048  * @brief Return pointer to RQ buffer entry.
2049  *
2050  * @par Description
2051  * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex.
2052  *
2053  * @param hw Hardware context.
2054  * @param rqindex Index of the RQ that is being processed.
2055  * @param bufindex Index into the RQ that is being processed.
2056  *
2057  * @return Pointer to the sequence structure, or NULL otherwise.
2058  */
2059 static ocs_hw_sequence_t *
2060 ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex)
2061 {
2062         sli4_queue_t *rq_hdr = &hw->rq[rqindex];
2063         sli4_queue_t *rq_payload = &hw->rq[rqindex+1];
2064         ocs_hw_sequence_t *seq = NULL;
2065         hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
2066
2067 #if defined(ENABLE_DEBUG_RQBUF)
2068         uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff);
2069 #endif
2070
2071         if (bufindex >= rq_hdr->length) {
2072                 ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n",
2073                             rqindex, bufindex, rq_hdr->length, rq_hdr->id);
2074                 return NULL;
2075         }
2076
2077         sli_queue_lock(rq_hdr);
2078         sli_queue_lock(rq_payload);
2079
2080 #if defined(ENABLE_DEBUG_RQBUF)
2081         /* Put a debug value into the rq, to track which entries are still valid */
2082         _sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value);
2083         _sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value);
2084 #endif
2085
2086         seq = rq->rq_tracker[bufindex];
2087         rq->rq_tracker[bufindex] = NULL;
2088
2089         if (seq == NULL ) {
2090                 ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n",
2091                             rqindex, bufindex, rq_hdr->index);
2092         }
2093
2094         sli_queue_unlock(rq_payload);
2095         sli_queue_unlock(rq_hdr);
2096         return seq;
2097 }
2098
2099 /**
2100  * @brief Posts an RQ buffer to a queue and update the verification structures
2101  *
2102  * @param hw            hardware context
2103  * @param seq Pointer to sequence object.
2104  *
2105  * @return Returns 0 on success, or a non-zero value otherwise.
2106  */
2107 static int32_t
2108 ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2109 {
2110         sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex];
2111         sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex];
2112         uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
2113         hw_rq_t *rq = hw->hw_rq[hw_rq_index];
2114         uint32_t     phys_hdr[2];
2115         uint32_t     phys_payload[2];
2116         int32_t      qindex_hdr;
2117         int32_t      qindex_payload;
2118
2119         /* Update the RQ verification lookup tables */
2120         phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys);
2121         phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys);
2122         phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys);
2123         phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys);
2124
2125         sli_queue_lock(rq_hdr);
2126         sli_queue_lock(rq_payload);
2127
2128         /*
2129          * Note: The header must be posted last for buffer pair mode because
2130          *       posting on the header queue posts the payload queue as well.
2131          *       We do not ring the payload queue independently in RQ pair mode.
2132          */
2133         qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload);
2134         qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr);
2135         if (qindex_hdr < 0 ||
2136             qindex_payload < 0) {
2137                 ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
2138                 sli_queue_unlock(rq_payload);
2139                 sli_queue_unlock(rq_hdr);
2140                 return OCS_HW_RTN_ERROR;
2141         }
2142
2143         /* ensure the indexes are the same */
2144         ocs_hw_assert(qindex_hdr == qindex_payload);
2145
2146         /* Update the lookup table */
2147         if (rq->rq_tracker[qindex_hdr] == NULL) {
2148                 rq->rq_tracker[qindex_hdr] = seq;
2149         } else {
2150                 ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n",
2151                              hw_rq_index, qindex_hdr);
2152         }
2153
2154         sli_queue_unlock(rq_payload);
2155         sli_queue_unlock(rq_hdr);
2156         return OCS_HW_RTN_SUCCESS;
2157 }
2158
2159 /**
2160  * @brief Return RQ buffers (while in RQ pair mode).
2161  *
2162  * @par Description
2163  * The header and payload buffers are returned to the Receive Queue.
2164  *
2165  * @param hw Hardware context.
2166  * @param seq Header/payload sequence buffers.
2167  *
2168  * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure.
2169  */
2170
2171 ocs_hw_rtn_e
2172 ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2173 {
2174         ocs_hw_rtn_e   rc = OCS_HW_RTN_SUCCESS;
2175
2176         /* Check for auto xfer rdy dummy buffers and call the proper release function. */
2177         if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) {
2178                 return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq);
2179         }
2180
2181         /*
2182          * Post the data buffer first. Because in RQ pair mode, ringing the
2183          * doorbell of the header ring will post the data buffer as well.
2184          */
2185         if (ocs_hw_rqpair_put(hw, seq)) {
2186                 ocs_log_err(hw->os, "error writing buffers\n");
2187                 return OCS_HW_RTN_ERROR;
2188         }
2189
2190         return rc;
2191 }
2192
2193 /**
2194  * @brief Find the RQ index of RQ_ID.
2195  *
2196  * @param hw Hardware context.
2197  * @param rq_id RQ ID to find.
2198  *
2199  * @return Returns the RQ index, or -1 if not found
2200  */
2201 static inline int32_t
2202 ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id)
2203 {
2204         return ocs_hw_queue_hash_find(hw->rq_hash, rq_id);
2205 }
2206
2207 /**
2208  * @ingroup devInitShutdown
2209  * @brief Allocate auto xfer rdy buffers.
2210  *
2211  * @par Description
2212  * Allocates the auto xfer rdy buffers and places them on the free list.
2213  *
2214  * @param hw Hardware context allocated by the caller.
2215  * @param num_buffers Number of buffers to allocate.
2216  *
2217  * @return Returns 0 on success, or a non-zero value on failure.
2218  */
2219 ocs_hw_rtn_e
2220 ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers)
2221 {
2222         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2223         uint32_t i;
2224
2225         hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE);
2226         if (hw->auto_xfer_rdy_buf_pool == NULL) {
2227                 ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n");
2228                 return OCS_HW_RTN_NO_MEMORY;
2229         }
2230
2231         for (i = 0; i < num_buffers; i++) {
2232                 /* allocate the wrapper object */
2233                 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2234                 ocs_hw_assert(buf != NULL);
2235
2236                 /* allocate the auto xfer ready buffer */
2237                 if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) {
2238                         ocs_log_err(hw->os, "DMA allocation failed\n");
2239                         ocs_free(hw->os, buf, sizeof(*buf));
2240                         return OCS_HW_RTN_NO_MEMORY;
2241                 }
2242
2243                 /* build a fake data header in big endian */
2244                 buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2245                 buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2246                 buf->hdr.type = FC_TYPE_FCP;
2247                 buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2248                                             FC_FCTL_FIRST_SEQUENCE |
2249                                             FC_FCTL_LAST_SEQUENCE |
2250                                             FC_FCTL_END_SEQUENCE |
2251                                             FC_FCTL_SEQUENCE_INITIATIVE);
2252
2253                 /* build the fake header DMA object */
2254                 buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2255                 buf->header.dma.virt = &buf->hdr;
2256                 buf->header.dma.alloc = buf;
2257                 buf->header.dma.size = sizeof(buf->hdr);
2258                 buf->header.dma.len = sizeof(buf->hdr);
2259
2260                 buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2261         }
2262         return OCS_HW_RTN_SUCCESS;
2263 }
2264
2265 /**
2266  * @ingroup devInitShutdown
2267  * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX.
2268  *
2269  * @par Description
2270  * When new buffers are freed, check existing XRIs waiting for buffers.
2271  *
2272  * @param hw Hardware context allocated by the caller.
2273  */
2274 static void
2275 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw)
2276 {
2277         ocs_hw_io_t *io;
2278         int32_t rc;
2279
2280         ocs_lock(&hw->io_lock);
2281
2282         while (!ocs_list_empty(&hw->io_port_dnrx)) {
2283                 io = ocs_list_remove_head(&hw->io_port_dnrx);
2284                 rc = ocs_hw_reque_xri(hw, io);
2285                 if(rc) {
2286                         break;
2287                 }
2288         }
2289
2290         ocs_unlock(&hw->io_lock);
2291 }
2292
2293 /**
2294  * @brief Called when the POST_SGL_PAGE command completes.
2295  *
2296  * @par Description
2297  * Free the mailbox command buffer.
2298  *
2299  * @param hw Hardware context.
2300  * @param status Status field from the mbox completion.
2301  * @param mqe Mailbox response structure.
2302  * @param arg Pointer to a callback function that signals the caller that the command is done.
2303  *
2304  * @return Returns 0.
2305  */
2306 static int32_t
2307 ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
2308 {
2309         if (status != 0) {
2310                 ocs_log_debug(hw->os, "Status 0x%x\n", status);
2311         }
2312
2313         ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2314         return 0;
2315 }
2316
2317 /**
2318  * @brief Prepares an XRI to move to the chip.
2319  *
2320  * @par Description
2321  * Puts the data SGL into the SGL list for the IO object and possibly registers
2322  * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are
2323  * mailbox commands, we don't need to wait for completion before preceding.
2324  *
2325  * @param hw Hardware context allocated by the caller.
2326  * @param io Pointer to the IO object.
2327  *
2328  * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure.
2329  */
2330 ocs_hw_rtn_e
2331 ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io)
2332 {
2333         /* We only need to preregister the SGL if it has not yet been done. */
2334         if (!sli_get_sgl_preregister(&hw->sli)) {
2335                 uint8_t *post_sgl;
2336                 ocs_dma_t *psgls = &io->def_sgl;
2337                 ocs_dma_t **sgls = &psgls;
2338
2339                 /* non-local buffer required for mailbox queue */
2340                 post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2341                 if (post_sgl == NULL) {
2342                         ocs_log_err(hw->os, "no buffer for command\n");
2343                         return OCS_HW_RTN_NO_MEMORY;
2344                 }
2345                 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE,
2346                                                 io->indicator, 1, sgls, NULL, NULL)) {
2347                         if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT,
2348                                             ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) {
2349                                 ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE);
2350                                 ocs_log_err(hw->os, "SGL post failed\n");
2351                                 return OCS_HW_RTN_ERROR;
2352                         }
2353                 }
2354         }
2355
2356         ocs_lock(&hw->io_lock);
2357         if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */
2358                 ocs_unlock(&hw->io_lock);
2359                 return OCS_HW_RTN_ERROR;
2360         }
2361         ocs_unlock(&hw->io_lock);
2362         return OCS_HW_RTN_SUCCESS;
2363 }
2364
2365 /**
2366  * @brief Prepares an XRI to move back to the host.
2367  *
2368  * @par Description
2369  * Releases any attached buffer back to the pool.
2370  *
2371  * @param hw Hardware context allocated by the caller.
2372  * @param io Pointer to the IO object.
2373  */
2374 void
2375 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io)
2376 {
2377         if (io->axr_buf != NULL) {
2378                 ocs_lock(&hw->io_lock);
2379                         /* check  list and remove if there */
2380                         if (ocs_list_on_list(&io->dnrx_link)) {
2381                                 ocs_list_remove(&hw->io_port_dnrx, io);
2382                                 io->auto_xfer_rdy_dnrx = 0;
2383
2384                                 /* release the count for waiting for a buffer */
2385                                 ocs_hw_io_free(hw, io);
2386                         }
2387
2388                         ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
2389                         io->axr_buf = NULL;
2390                 ocs_unlock(&hw->io_lock);
2391
2392                 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2393         }
2394         return;
2395 }
2396
2397
2398 /**
2399  * @brief Posts an auto xfer rdy buffer to an IO.
2400  *
2401  * @par Description
2402  * Puts the data SGL into the SGL list for the IO object
2403  * @n @name
2404  * @b Note: io_lock must be held.
2405  *
2406  * @param hw Hardware context allocated by the caller.
2407  * @param io Pointer to the IO object.
2408  *
2409  * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs.
2410  */
2411 uint8_t
2412 ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf)
2413 {
2414         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2415         sli4_sge_t      *data;
2416
2417         if(!reuse_buf) {
2418                 buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool);
2419                 io->axr_buf = buf;
2420         }
2421
2422         data = io->def_sgl.virt;
2423         data[0].sge_type = SLI4_SGE_TYPE_SKIP;
2424         data[0].last = 0;
2425
2426         /*
2427          * Note: if we are doing DIF assists, then the SGE[1] must contain the
2428          * DI_SEED SGE. The host is responsible for programming:
2429          *   SGE Type (Word 2, bits 30:27)
2430          *   Replacement App Tag (Word 2 bits 15:0)
2431          *   App Tag (Word 3 bits 15:0)
2432          *   New Ref Tag (Word 3 bit 23)
2433          *   Metadata Enable (Word 3 bit 20)
2434          *   Auto-Increment RefTag (Word 3 bit 19)
2435          *   Block Size (Word 3 bits 18:16)
2436          * The following fields are managed by the SLI Port:
2437          *    Ref Tag Compare (Word 0)
2438          *    Replacement Ref Tag (Word 1) - In not the LBA
2439          *    NA (Word 2 bit 25)
2440          *    Opcode RX (Word 3 bits 27:24)
2441          *    Checksum Enable (Word 3 bit 22)
2442          *    RefTag Enable (Word 3 bit 21)
2443          *
2444          * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared.
2445          */
2446         if (hw->config.auto_xfer_rdy_p_type) {
2447                 sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1];
2448
2449                 diseed->sge_type = SLI4_SGE_TYPE_DISEED;
2450                 diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value;
2451                 diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value;
2452                 diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid;
2453                 diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */
2454                 diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
2455         } else {
2456                 data[1].sge_type = SLI4_SGE_TYPE_SKIP;
2457                 data[1].last = 0;
2458         }
2459
2460         data[2].sge_type = SLI4_SGE_TYPE_DATA;
2461         data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys);
2462         data[2].buffer_address_low  = ocs_addr32_lo(io->axr_buf->payload.dma.phys);
2463         data[2].buffer_length = io->axr_buf->payload.dma.size;
2464         data[2].last = TRUE;
2465         data[3].sge_type = SLI4_SGE_TYPE_SKIP;
2466
2467         return 0;
2468 }
2469
2470 /**
2471  * @brief Return auto xfer ready buffers (while in RQ pair mode).
2472  *
2473  * @par Description
2474  * The header and payload buffers are returned to the auto xfer rdy pool.
2475  *
2476  * @param hw Hardware context.
2477  * @param seq Header/payload sequence buffers.
2478  *
2479  * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure.
2480  */
2481
2482 static ocs_hw_rtn_e
2483 ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2484 {
2485         ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc;
2486
2487         buf->data_cqe = 0;
2488         buf->cmd_cqe = 0;
2489         buf->fcfi = 0;
2490         buf->call_axr_cmd = 0;
2491         buf->call_axr_data = 0;
2492
2493         /* build a fake data header in big endian */
2494         buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2495         buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2496         buf->hdr.type = FC_TYPE_FCP;
2497         buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2498                                         FC_FCTL_FIRST_SEQUENCE |
2499                                         FC_FCTL_LAST_SEQUENCE |
2500                                         FC_FCTL_END_SEQUENCE |
2501                                         FC_FCTL_SEQUENCE_INITIATIVE);
2502
2503         /* build the fake header DMA object */
2504         buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2505         buf->header.dma.virt = &buf->hdr;
2506         buf->header.dma.alloc = buf;
2507         buf->header.dma.size = sizeof(buf->hdr);
2508         buf->header.dma.len = sizeof(buf->hdr);
2509         buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2510
2511         ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2512
2513         return OCS_HW_RTN_SUCCESS;
2514 }
2515
2516 /**
2517  * @ingroup devInitShutdown
2518  * @brief Free auto xfer rdy buffers.
2519  *
2520  * @par Description
2521  * Frees the auto xfer rdy buffers.
2522  *
2523  * @param hw Hardware context allocated by the caller.
2524  *
2525  * @return Returns 0 on success, or a non-zero value on failure.
2526  */
2527 static void
2528 ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw)
2529 {
2530         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2531         uint32_t i;
2532
2533         if (hw->auto_xfer_rdy_buf_pool != NULL) {
2534                 ocs_lock(&hw->io_lock);
2535                         for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) {
2536                                 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2537                                 if (buf != NULL) {
2538                                         ocs_dma_free(hw->os, &buf->payload.dma);
2539                                 }
2540                         }
2541                 ocs_unlock(&hw->io_lock);
2542
2543                 ocs_pool_free(hw->auto_xfer_rdy_buf_pool);
2544                 hw->auto_xfer_rdy_buf_pool = NULL;
2545         }
2546 }
2547
2548 /**
2549  * @ingroup devInitShutdown
2550  * @brief Configure the rq_pair function from ocs_hw_init().
2551  *
2552  * @par Description
2553  * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature.
2554  *
2555  * @param hw Hardware context allocated by the caller.
2556  *
2557  * @return Returns 0 on success, or a non-zero value on failure.
2558  */
2559 ocs_hw_rtn_e
2560 ocs_hw_rqpair_init(ocs_hw_t *hw)
2561 {
2562         ocs_hw_rtn_e    rc;
2563         uint32_t xris_posted;
2564
2565         ocs_log_debug(hw->os, "RQ Pair mode\n");
2566
2567         /*
2568          * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would
2569          * return with an error. So allocate the buffers based on the initial XRI pool required to support this
2570          * feature.
2571          */
2572         if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
2573             hw->config.auto_xfer_rdy_size > 0) {
2574                 if (hw->auto_xfer_rdy_buf_pool == NULL) {
2575                         /*
2576                          * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have
2577                          * one to post back for the case where the response phase is started in the context of
2578                          * the data completion.
2579                          */
2580                         rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1);
2581                         if (rc != OCS_HW_RTN_SUCCESS) {
2582                                 return rc;
2583                         }
2584                 } else {
2585                         ocs_pool_reset(hw->auto_xfer_rdy_buf_pool);
2586                 }
2587
2588                 /* Post the auto XFR_RDY XRIs */
2589                 xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt);
2590                 if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) {
2591                         ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted);
2592                         return OCS_HW_RTN_ERROR;
2593                 }
2594         }
2595
2596         return 0;
2597 }
2598
2599 /**
2600  * @ingroup devInitShutdown
2601  * @brief Tear down the rq_pair function from ocs_hw_teardown().
2602  *
2603  * @par Description
2604  * Frees the buffers to auto xfer rdy.
2605  *
2606  * @param hw Hardware context allocated by the caller.
2607  */
2608 void
2609 ocs_hw_rqpair_teardown(ocs_hw_t *hw)
2610 {
2611         /* We need to free any auto xfer ready buffers */
2612         ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw);
2613 }