]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/ocs_fc/ocs_hw_queues.c
bhyvectl(8): Normalize the man page date
[FreeBSD/FreeBSD.git] / sys / dev / ocs_fc / ocs_hw_queues.c
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33
34 /**
35  * @file
36  *
37  */
38
39 #include "ocs_os.h"
40 #include "ocs_hw.h"
41 #include "ocs_hw_queues.h"
42
43 #define HW_QTOP_DEBUG           0
44
45 /**
46  * @brief Initialize queues
47  *
48  * Given the parsed queue topology spec, the SLI queues are created and
49  * initialized
50  *
51  * @param hw pointer to HW object
52  * @param qtop pointer to queue topology
53  *
54  * @return returns 0 for success, an error code value for failure.
55  */
56 ocs_hw_rtn_e
57 ocs_hw_init_queues(ocs_hw_t *hw, ocs_hw_qtop_t *qtop)
58 {
59         uint32_t i, j;
60         uint32_t default_lengths[QTOP_LAST], len;
61         uint32_t rqset_len = 0, rqset_ulp = 0, rqset_count = 0;
62         uint8_t rqset_filter_mask = 0;
63         hw_eq_t *eqs[hw->config.n_rq];
64         hw_cq_t *cqs[hw->config.n_rq];
65         hw_rq_t *rqs[hw->config.n_rq];
66         ocs_hw_qtop_entry_t *qt, *next_qt;
67         ocs_hw_mrq_t mrq;
68         bool use_mrq = FALSE;
69
70         hw_eq_t *eq = NULL;
71         hw_cq_t *cq = NULL;
72         hw_wq_t *wq = NULL;
73         hw_rq_t *rq = NULL;
74         hw_mq_t *mq = NULL;
75
76         mrq.num_pairs = 0;
77         default_lengths[QTOP_EQ] = 1024;
78         default_lengths[QTOP_CQ] = hw->num_qentries[SLI_QTYPE_CQ];
79         default_lengths[QTOP_WQ] = hw->num_qentries[SLI_QTYPE_WQ];
80         default_lengths[QTOP_RQ] = hw->num_qentries[SLI_QTYPE_RQ];
81         default_lengths[QTOP_MQ] = OCS_HW_MQ_DEPTH;
82
83         ocs_hw_verify(hw != NULL, OCS_HW_RTN_INVALID_ARG);
84
85         hw->eq_count = 0;
86         hw->cq_count = 0;
87         hw->mq_count = 0;
88         hw->wq_count = 0;
89         hw->rq_count = 0;
90         hw->hw_rq_count = 0;
91         ocs_list_init(&hw->eq_list, hw_eq_t, link);
92
93         /* If MRQ is requested, Check if it is supported by SLI. */
94         if ((hw->config.n_rq > 1 ) && !hw->sli.config.features.flag.mrqp) {
95                 ocs_log_err(hw->os, "MRQ topology not supported by SLI4.\n");
96                 return OCS_HW_RTN_ERROR;
97         }
98
99         if (hw->config.n_rq > 1)
100                 use_mrq = TRUE;
101
102         /* Allocate class WQ pools */
103         for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
104                 hw->wq_class_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
105                 if (hw->wq_class_array[i] == NULL) {
106                         ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
107                         return OCS_HW_RTN_NO_MEMORY;
108                 }
109         }
110
111         /* Allocate per CPU WQ pools */
112         for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
113                 hw->wq_cpu_array[i] = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
114                 if (hw->wq_cpu_array[i] == NULL) {
115                         ocs_log_err(hw->os, "ocs_varray_alloc for wq_class failed\n");
116                         return OCS_HW_RTN_NO_MEMORY;
117                 }
118         }
119
120         ocs_hw_assert(qtop != NULL);
121
122         for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
123                 if (i == qtop->inuse_count - 1)
124                         next_qt = NULL;
125                 else
126                         next_qt = qt + 1;
127
128                 switch(qt->entry) {
129                 case QTOP_EQ:
130                         len = (qt->len) ? qt->len : default_lengths[QTOP_EQ];
131
132                         if (qt->set_default) {
133                                 default_lengths[QTOP_EQ] = len;
134                                 break;
135                         }
136
137                         eq = hw_new_eq(hw, len);
138                         if (eq == NULL) {
139                                 hw_queue_teardown(hw);
140                                 return OCS_HW_RTN_NO_MEMORY;
141                         }
142                         break;
143
144                 case QTOP_CQ:
145                         len = (qt->len) ? qt->len : default_lengths[QTOP_CQ];
146
147                         if (qt->set_default) {
148                                 default_lengths[QTOP_CQ] = len;
149                                 break;
150                         }
151                         
152                         if (!eq || !next_qt) {
153                                 goto fail;
154                         }
155
156                         /* If this CQ is for MRQ, then delay the creation */
157                         if (!use_mrq || next_qt->entry != QTOP_RQ) {
158                                 cq = hw_new_cq(eq, len);
159                                 if (cq == NULL) {
160                                         goto fail;
161                                 }
162                         }
163                         break;
164
165                 case QTOP_WQ: {
166                         len = (qt->len) ? qt->len : default_lengths[QTOP_WQ];
167                         if (qt->set_default) {
168                                 default_lengths[QTOP_WQ] = len;
169                                 break;
170                         }
171
172                         if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
173                                 ocs_log_err(hw->os, "invalid ULP %d for WQ\n", qt->ulp);
174                                 hw_queue_teardown(hw);
175                                 return OCS_HW_RTN_NO_MEMORY;
176                         }
177                         
178                         if (cq == NULL)
179                                 goto fail;
180
181                         wq = hw_new_wq(cq, len, qt->class, hw->ulp_start + qt->ulp);
182                         if (wq == NULL) {
183                                 goto fail;
184                         }
185
186                         /* Place this WQ on the EQ WQ array */
187                         if (ocs_varray_add(eq->wq_array, wq)) {
188                                 ocs_log_err(hw->os, "QTOP_WQ: EQ ocs_varray_add failed\n");
189                                 hw_queue_teardown(hw);
190                                 return OCS_HW_RTN_ERROR;
191                         }
192
193                         /* Place this WQ on the HW class array */
194                         if (qt->class < ARRAY_SIZE(hw->wq_class_array)) {
195                                 if (ocs_varray_add(hw->wq_class_array[qt->class], wq)) {
196                                         ocs_log_err(hw->os, "HW wq_class_array ocs_varray_add failed\n");
197                                         hw_queue_teardown(hw);
198                                         return OCS_HW_RTN_ERROR;
199                                 }
200                         } else {
201                                 ocs_log_err(hw->os, "Invalid class value: %d\n", qt->class);
202                                 hw_queue_teardown(hw);
203                                 return OCS_HW_RTN_ERROR;
204                         }
205
206                         /*
207                          * Place this WQ on the per CPU list, asumming that EQs are mapped to cpu given
208                          * by the EQ instance modulo number of CPUs
209                          */
210                         if (ocs_varray_add(hw->wq_cpu_array[eq->instance % ocs_get_num_cpus()], wq)) {
211                                 ocs_log_err(hw->os, "HW wq_cpu_array ocs_varray_add failed\n");
212                                 hw_queue_teardown(hw);
213                                 return OCS_HW_RTN_ERROR;
214                         }
215
216                         break;
217                 }
218                 case QTOP_RQ: {
219                         len = (qt->len) ? qt->len : default_lengths[QTOP_RQ];
220                         if (qt->set_default) {
221                                 default_lengths[QTOP_RQ] = len;
222                                 break;
223                         }
224
225                         if ((hw->ulp_start + qt->ulp) > hw->ulp_max) {
226                                 ocs_log_err(hw->os, "invalid ULP %d for RQ\n", qt->ulp);
227                                 hw_queue_teardown(hw);
228                                 return OCS_HW_RTN_NO_MEMORY;
229                         }
230
231                         if (use_mrq) {
232                                 mrq.rq_cfg[mrq.num_pairs].len = len;
233                                 mrq.rq_cfg[mrq.num_pairs].ulp = hw->ulp_start + qt->ulp; 
234                                 mrq.rq_cfg[mrq.num_pairs].filter_mask = qt->filter_mask;
235                                 mrq.rq_cfg[mrq.num_pairs].eq = eq;
236                                 mrq.num_pairs ++;
237                         } else {
238                                 rq = hw_new_rq(cq, len, hw->ulp_start + qt->ulp);
239                                 if (rq == NULL) {
240                                         hw_queue_teardown(hw);
241                                         return OCS_HW_RTN_NO_MEMORY;
242                                 }
243                                 rq->filter_mask = qt->filter_mask;
244                         }
245                         break;
246                 }
247
248                 case QTOP_MQ:
249                         len = (qt->len) ? qt->len : default_lengths[QTOP_MQ];
250                         if (qt->set_default) {
251                                 default_lengths[QTOP_MQ] = len;
252                                 break;
253                         }
254
255                         if (cq == NULL)
256                                 goto fail;
257
258                         mq = hw_new_mq(cq, len);
259                         if (mq == NULL) {
260                                 goto fail;
261                         }
262                         break;
263
264                 default:
265                         ocs_hw_assert(0);
266                         break;
267                 }
268         }
269
270         if (mrq.num_pairs) {
271                 /* First create normal RQs. */
272                 for (i = 0; i < mrq.num_pairs; i++) {
273                         for (j = 0; j < mrq.num_pairs; j++) {
274                                 if ((i != j) && (mrq.rq_cfg[i].filter_mask == mrq.rq_cfg[j].filter_mask)) {
275                                         /* This should be created using set */
276                                         if (rqset_filter_mask && (rqset_filter_mask != mrq.rq_cfg[i].filter_mask)) {
277                                                 ocs_log_crit(hw->os, "Cant create morethan one RQ Set\n");
278                                                 hw_queue_teardown(hw);
279                                                 return OCS_HW_RTN_ERROR;
280                                         } else if (!rqset_filter_mask){
281                                                 rqset_filter_mask = mrq.rq_cfg[i].filter_mask;
282                                                 rqset_len = mrq.rq_cfg[i].len;
283                                                 rqset_ulp = mrq.rq_cfg[i].ulp;
284                                         }
285                                         eqs[rqset_count] = mrq.rq_cfg[i].eq;
286                                         rqset_count++;
287                                         break;
288                                 }
289                         }
290                         if (j == mrq.num_pairs) {
291                                 /* Normal RQ */
292                                 cq = hw_new_cq(mrq.rq_cfg[i].eq, default_lengths[QTOP_CQ]);
293                                 if (cq == NULL) {
294                                         hw_queue_teardown(hw);
295                                         return OCS_HW_RTN_NO_MEMORY;
296                                 }
297
298                                 rq = hw_new_rq(cq, mrq.rq_cfg[i].len, mrq.rq_cfg[i].ulp);
299                                 if (rq == NULL) {
300                                         hw_queue_teardown(hw);
301                                         return OCS_HW_RTN_NO_MEMORY;
302                                 }
303                                 rq->filter_mask = mrq.rq_cfg[i].filter_mask;
304                         }
305                 }
306
307                 /* Now create RQ Set */
308                 if (rqset_count) {
309                         if (rqset_count > OCE_HW_MAX_NUM_MRQ_PAIRS) {
310                                 ocs_log_crit(hw->os,
311                                              "Max Supported MRQ pairs = %d\n",
312                                              OCE_HW_MAX_NUM_MRQ_PAIRS);
313                                 hw_queue_teardown(hw);
314                                 return OCS_HW_RTN_ERROR;
315                         }
316
317                         /* Create CQ set */
318                         if (hw_new_cq_set(eqs, cqs, rqset_count, default_lengths[QTOP_CQ])) {
319                                 hw_queue_teardown(hw);
320                                 return OCS_HW_RTN_ERROR;
321                         }
322
323                         /* Create RQ set */
324                         if (hw_new_rq_set(cqs, rqs, rqset_count, rqset_len, rqset_ulp)) {
325                                 hw_queue_teardown(hw);
326                                 return OCS_HW_RTN_ERROR;
327                         }
328
329                         for (i = 0; i < rqset_count ; i++) {
330                                 rqs[i]->filter_mask = rqset_filter_mask;
331                                 rqs[i]->is_mrq = TRUE;
332                                 rqs[i]->base_mrq_id = rqs[0]->hdr->id;
333                         }
334
335                         hw->hw_mrq_count = rqset_count;
336                 }
337         }
338
339         return OCS_HW_RTN_SUCCESS;
340 fail:
341         hw_queue_teardown(hw);
342         return OCS_HW_RTN_NO_MEMORY;
343
344 }
345
346 /**
347  * @brief Allocate a new EQ object
348  *
349  * A new EQ object is instantiated
350  *
351  * @param hw pointer to HW object
352  * @param entry_count number of entries in the EQ
353  *
354  * @return pointer to allocated EQ object
355  */
356 hw_eq_t*
357 hw_new_eq(ocs_hw_t *hw, uint32_t entry_count)
358 {
359         hw_eq_t *eq = ocs_malloc(hw->os, sizeof(*eq), OCS_M_ZERO | OCS_M_NOWAIT);
360
361         if (eq != NULL) {
362                 eq->type = SLI_QTYPE_EQ;
363                 eq->hw = hw;
364                 eq->entry_count = entry_count;
365                 eq->instance = hw->eq_count++;
366                 eq->queue = &hw->eq[eq->instance];
367                 ocs_list_init(&eq->cq_list, hw_cq_t, link);
368
369                 eq->wq_array = ocs_varray_alloc(hw->os, OCS_HW_MAX_NUM_WQ);
370                 if (eq->wq_array == NULL) {
371                         ocs_free(hw->os, eq, sizeof(*eq));
372                         eq = NULL;
373                 } else {
374                         if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) {
375                                 ocs_log_err(hw->os, "EQ[%d] allocation failure\n", eq->instance);
376                                 ocs_free(hw->os, eq, sizeof(*eq));
377                                 eq = NULL;
378                         } else {
379                                 sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
380                                 hw->hw_eq[eq->instance] = eq;
381                                 ocs_list_add_tail(&hw->eq_list, eq);
382                                 ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id,
383                                         eq->entry_count);
384                         }
385                 }
386         }
387         return eq;
388 }
389
390 /**
391  * @brief Allocate a new CQ object
392  *
393  * A new CQ object is instantiated
394  *
395  * @param eq pointer to parent EQ object
396  * @param entry_count number of entries in the CQ
397  *
398  * @return pointer to allocated CQ object
399  */
400 hw_cq_t*
401 hw_new_cq(hw_eq_t *eq, uint32_t entry_count)
402 {
403         ocs_hw_t *hw = eq->hw;
404         hw_cq_t *cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
405
406         if (cq != NULL) {
407                 cq->eq = eq;
408                 cq->type = SLI_QTYPE_CQ;
409                 cq->instance = eq->hw->cq_count++;
410                 cq->entry_count = entry_count;
411                 cq->queue = &hw->cq[cq->instance];
412
413                 ocs_list_init(&cq->q_list, hw_q_t, link);
414
415                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) {
416                         ocs_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
417                                 eq->instance,
418                                 eq->entry_count);
419                         ocs_free(hw->os, cq, sizeof(*cq));
420                         cq = NULL;
421                 } else {
422                         hw->hw_cq[cq->instance] = cq;
423                         ocs_list_add_tail(&eq->cq_list, cq);
424                         ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id,
425                                 cq->entry_count);
426                 }
427         }
428         return cq;
429 }
430
431 /**
432  * @brief Allocate a new CQ Set of objects.
433  *
434  * @param eqs pointer to a set of EQ objects.
435  * @param cqs pointer to a set of CQ objects to be returned.
436  * @param num_cqs number of CQ queues in the set.
437  * @param entry_count number of entries in the CQ.
438  *
439  * @return 0 on success and -1 on failure.
440  */
441 uint32_t
442 hw_new_cq_set(hw_eq_t *eqs[], hw_cq_t *cqs[], uint32_t num_cqs, uint32_t entry_count)
443 {
444         uint32_t i;
445         ocs_hw_t *hw = eqs[0]->hw;
446         sli4_t *sli4 = &hw->sli;
447         hw_cq_t *cq = NULL;
448         sli4_queue_t *qs[SLI_MAX_CQ_SET_COUNT], *assocs[SLI_MAX_CQ_SET_COUNT];
449
450         /* Initialise CQS pointers to NULL */
451         for (i = 0; i < num_cqs; i++) {
452                 cqs[i] = NULL;
453         }
454
455         for (i = 0; i < num_cqs; i++) {
456                 cq = ocs_malloc(hw->os, sizeof(*cq), OCS_M_ZERO | OCS_M_NOWAIT);
457                 if (cq == NULL)
458                         goto error;
459
460                 cqs[i]          = cq;
461                 cq->eq          = eqs[i];
462                 cq->type        = SLI_QTYPE_CQ;
463                 cq->instance    = hw->cq_count++;
464                 cq->entry_count = entry_count;
465                 cq->queue       = &hw->cq[cq->instance];
466                 qs[i]           = cq->queue;
467                 assocs[i]       = eqs[i]->queue;
468                 ocs_list_init(&cq->q_list, hw_q_t, link);
469         }
470
471         if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assocs)) {
472                 ocs_log_err(NULL, "Failed to create CQ Set. \n");
473                 goto error;
474         }
475
476         for (i = 0; i < num_cqs; i++) {
477                 hw->hw_cq[cqs[i]->instance] = cqs[i];
478                 ocs_list_add_tail(&cqs[i]->eq->cq_list, cqs[i]);
479         }
480
481         return 0;
482
483 error:
484         for (i = 0; i < num_cqs; i++) {
485                 if (cqs[i]) {
486                         ocs_free(hw->os, cqs[i], sizeof(*cqs[i]));
487                         cqs[i] = NULL;
488                 }
489         }
490         return -1;
491 }
492
493 /**
494  * @brief Allocate a new MQ object
495  *
496  * A new MQ object is instantiated
497  *
498  * @param cq pointer to parent CQ object
499  * @param entry_count number of entries in the MQ
500  *
501  * @return pointer to allocated MQ object
502  */
503 hw_mq_t*
504 hw_new_mq(hw_cq_t *cq, uint32_t entry_count)
505 {
506         ocs_hw_t *hw = cq->eq->hw;
507         hw_mq_t *mq = ocs_malloc(hw->os, sizeof(*mq), OCS_M_ZERO | OCS_M_NOWAIT);
508
509         if (mq != NULL) {
510                 mq->cq = cq;
511                 mq->type = SLI_QTYPE_MQ;
512                 mq->instance = cq->eq->hw->mq_count++;
513                 mq->entry_count = entry_count;
514                 mq->entry_size = OCS_HW_MQ_DEPTH;
515                 mq->queue = &hw->mq[mq->instance];
516
517                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_MQ,
518                                     mq->queue,
519                                     mq->entry_size,
520                                     cq->queue, 0)) {
521                         ocs_log_err(hw->os, "MQ allocation failure\n");
522                         ocs_free(hw->os, mq, sizeof(*mq));
523                         mq = NULL;
524                 } else {
525                         hw->hw_mq[mq->instance] = mq;
526                         ocs_list_add_tail(&cq->q_list, mq);
527                         ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id,
528                                 mq->entry_count);
529                 }
530         }
531         return mq;
532 }
533
534 /**
535  * @brief Allocate a new WQ object
536  *
537  * A new WQ object is instantiated
538  *
539  * @param cq pointer to parent CQ object
540  * @param entry_count number of entries in the WQ
541  * @param class WQ class
542  * @param ulp index of chute
543  *
544  * @return pointer to allocated WQ object
545  */
546 hw_wq_t*
547 hw_new_wq(hw_cq_t *cq, uint32_t entry_count, uint32_t class, uint32_t ulp)
548 {
549         ocs_hw_t *hw = cq->eq->hw;
550         hw_wq_t *wq = ocs_malloc(hw->os, sizeof(*wq), OCS_M_ZERO | OCS_M_NOWAIT);
551
552         if (wq != NULL) {
553                 wq->hw = cq->eq->hw;
554                 wq->cq = cq;
555                 wq->type = SLI_QTYPE_WQ;
556                 wq->instance = cq->eq->hw->wq_count++;
557                 wq->entry_count = entry_count;
558                 wq->queue = &hw->wq[wq->instance];
559                 wq->ulp = ulp;
560                 wq->wqec_set_count = OCS_HW_WQEC_SET_COUNT;
561                 wq->wqec_count = wq->wqec_set_count;
562                 wq->free_count = wq->entry_count - 1;
563                 wq->class = class;
564                 ocs_list_init(&wq->pending_list, ocs_hw_wqe_t, link);
565
566                 if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) {
567                         ocs_log_err(hw->os, "WQ allocation failure\n");
568                         ocs_free(hw->os, wq, sizeof(*wq));
569                         wq = NULL;
570                 } else {
571                         hw->hw_wq[wq->instance] = wq;
572                         ocs_list_add_tail(&cq->q_list, wq);
573                         ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id,
574                                 wq->entry_count, wq->class, wq->ulp);
575                 }
576         }
577         return wq;
578 }
579
580 /**
581  * @brief Allocate a hw_rq_t object
582  *
583  * Allocate an RQ object, which encapsulates 2 SLI queues (for rq pair)
584  *
585  * @param cq pointer to parent CQ object
586  * @param entry_count number of entries in the RQs
587  * @param ulp ULP index for this RQ
588  *
589  * @return pointer to newly allocated hw_rq_t
590  */
591 hw_rq_t*
592 hw_new_rq(hw_cq_t *cq, uint32_t entry_count, uint32_t ulp)
593 {
594         ocs_hw_t *hw = cq->eq->hw;
595         hw_rq_t *rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
596         uint32_t max_hw_rq;
597
598         ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
599
600         if (rq != NULL) {
601                 rq->instance = hw->hw_rq_count++;
602                 rq->cq = cq;
603                 rq->type = SLI_QTYPE_RQ;
604                 rq->ulp = ulp;
605
606                 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
607
608                 /* Create the header RQ */
609                 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
610                 rq->hdr = &hw->rq[hw->rq_count];
611                 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
612
613                 if (sli_fc_rq_alloc(&hw->sli, rq->hdr,
614                                     rq->entry_count,
615                                     rq->hdr_entry_size,
616                                     cq->queue,
617                                     ulp, TRUE)) {
618                         ocs_log_err(hw->os, "RQ allocation failure - header\n");
619                         ocs_free(hw->os, rq, sizeof(*rq));
620                         return NULL;
621                 }
622                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;  /* Update hw_rq_lookup[] */
623                 hw->rq_count++;
624                 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d hdr  size %4d ulp %d\n",
625                         rq->instance, rq->hdr->id, rq->entry_count, rq->hdr_entry_size, rq->ulp);
626
627                 /* Create the default data RQ */
628                 ocs_hw_assert(hw->rq_count < ARRAY_SIZE(hw->rq));
629                 rq->data = &hw->rq[hw->rq_count];
630                 rq->data_entry_size = hw->config.rq_default_buffer_size;
631
632                 if (sli_fc_rq_alloc(&hw->sli, rq->data,
633                                     rq->entry_count,
634                                     rq->data_entry_size,
635                                     cq->queue,
636                                     ulp, FALSE)) {
637                         ocs_log_err(hw->os, "RQ allocation failure - first burst\n");
638                         ocs_free(hw->os, rq, sizeof(*rq));
639                         return NULL;
640                 }
641                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;  /* Update hw_rq_lookup[] */
642                 hw->rq_count++;
643                 ocs_log_debug(hw->os, "create rq[%2d] id %3d len %4d data size %4d ulp %d\n", rq->instance,
644                         rq->data->id, rq->entry_count, rq->data_entry_size, rq->ulp);
645
646                 hw->hw_rq[rq->instance] = rq;
647                 ocs_list_add_tail(&cq->q_list, rq);
648
649                 rq->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
650                                             rq->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
651                 if (rq->rq_tracker == NULL) {
652                         ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
653                         return NULL;
654                 }
655         }
656         return rq;
657 }
658
659 /**
660  * @brief Allocate a hw_rq_t object SET
661  *
662  * Allocate an RQ object SET, where each element in set
663  * encapsulates 2 SLI queues (for rq pair)
664  *
665  * @param cqs pointers to be associated with RQs.
666  * @param rqs RQ pointers to be returned on success.
667  * @param num_rq_pairs number of rq pairs in the Set.
668  * @param entry_count number of entries in the RQs
669  * @param ulp ULP index for this RQ
670  *
671  * @return 0 in success and -1 on failure.
672  */
673 uint32_t
674 hw_new_rq_set(hw_cq_t *cqs[], hw_rq_t *rqs[], uint32_t num_rq_pairs, uint32_t entry_count, uint32_t ulp)
675 {
676         ocs_hw_t *hw = cqs[0]->eq->hw;
677         hw_rq_t *rq = NULL;
678         sli4_queue_t *qs[SLI_MAX_RQ_SET_COUNT * 2] = { NULL };
679         uint32_t max_hw_rq, i, q_count;
680
681         ocs_hw_get(hw, OCS_HW_MAX_RQ_ENTRIES, &max_hw_rq);
682
683         /* Initialise RQS pointers */
684         for (i = 0; i < num_rq_pairs; i++) {
685                 rqs[i] = NULL;
686         }
687
688         for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
689                 rq = ocs_malloc(hw->os, sizeof(*rq), OCS_M_ZERO | OCS_M_NOWAIT);
690                 if (rq == NULL)
691                         goto error;
692
693                 rqs[i] = rq;
694                 rq->instance = hw->hw_rq_count++;
695                 rq->cq = cqs[i];
696                 rq->type = SLI_QTYPE_RQ;
697                 rq->ulp = ulp;
698                 rq->entry_count = OCS_MIN(entry_count, OCS_MIN(max_hw_rq, OCS_HW_RQ_NUM_HDR));
699
700                 /* Header RQ */
701                 rq->hdr = &hw->rq[hw->rq_count];
702                 rq->hdr_entry_size = OCS_HW_RQ_HEADER_SIZE;
703                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
704                 hw->rq_count++;
705                 qs[q_count] = rq->hdr;
706
707                 /* Data RQ */
708                 rq->data = &hw->rq[hw->rq_count];
709                 rq->data_entry_size = hw->config.rq_default_buffer_size;
710                 hw->hw_rq_lookup[hw->rq_count] = rq->instance;
711                 hw->rq_count++;
712                 qs[q_count + 1] = rq->data;
713
714                 rq->rq_tracker = NULL;
715         }
716
717         if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
718                             cqs[0]->queue->id,
719                             rqs[0]->entry_count,
720                             rqs[0]->hdr_entry_size,
721                             rqs[0]->data_entry_size,
722                             ulp)) {
723                 ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id);
724                 goto error;
725         }
726
727         for (i = 0; i < num_rq_pairs; i++) {
728                 hw->hw_rq[rqs[i]->instance] = rqs[i];
729                 ocs_list_add_tail(&cqs[i]->q_list, rqs[i]);
730                 rqs[i]->rq_tracker = ocs_malloc(hw->os, sizeof(ocs_hw_sequence_t*) *
731                                             rqs[i]->entry_count, OCS_M_ZERO | OCS_M_NOWAIT);
732                 if (rqs[i]->rq_tracker == NULL) {
733                         ocs_log_err(hw->os, "RQ tracker buf allocation failure\n");
734                         goto error;
735                 }
736         }
737
738         return 0;
739
740 error:
741         for (i = 0; i < num_rq_pairs; i++) {
742                 if (rqs[i] != NULL) {
743                         if (rqs[i]->rq_tracker != NULL) {
744                                 ocs_free(hw->os, rqs[i]->rq_tracker,
745                                          sizeof(ocs_hw_sequence_t*) *
746                                          rqs[i]->entry_count);
747                         }
748                         ocs_free(hw->os, rqs[i], sizeof(*rqs[i]));
749                 }
750         }
751
752         return -1;
753 }
754
755 /**
756  * @brief Free an EQ object
757  *
758  * The EQ object and any child queue objects are freed
759  *
760  * @param eq pointer to EQ object
761  *
762  * @return none
763  */
764 void
765 hw_del_eq(hw_eq_t *eq)
766 {
767         if (eq != NULL) {
768                 hw_cq_t *cq;
769                 hw_cq_t *cq_next;
770
771                 ocs_list_foreach_safe(&eq->cq_list, cq, cq_next) {
772                         hw_del_cq(cq);
773                 }
774                 ocs_varray_free(eq->wq_array);
775                 ocs_list_remove(&eq->hw->eq_list, eq);
776                 eq->hw->hw_eq[eq->instance] = NULL;
777                 ocs_free(eq->hw->os, eq, sizeof(*eq));
778         }
779 }
780
781 /**
782  * @brief Free a CQ object
783  *
784  * The CQ object and any child queue objects are freed
785  *
786  * @param cq pointer to CQ object
787  *
788  * @return none
789  */
790 void
791 hw_del_cq(hw_cq_t *cq)
792 {
793         if (cq != NULL) {
794                 hw_q_t *q;
795                 hw_q_t *q_next;
796
797                 ocs_list_foreach_safe(&cq->q_list, q, q_next) {
798                         switch(q->type) {
799                         case SLI_QTYPE_MQ:
800                                 hw_del_mq((hw_mq_t*) q);
801                                 break;
802                         case SLI_QTYPE_WQ:
803                                 hw_del_wq((hw_wq_t*) q);
804                                 break;
805                         case SLI_QTYPE_RQ:
806                                 hw_del_rq((hw_rq_t*) q);
807                                 break;
808                         default:
809                                 break;
810                         }
811                 }
812                 ocs_list_remove(&cq->eq->cq_list, cq);
813                 cq->eq->hw->hw_cq[cq->instance] = NULL;
814                 ocs_free(cq->eq->hw->os, cq, sizeof(*cq));
815         }
816 }
817
818 /**
819  * @brief Free a MQ object
820  *
821  * The MQ object is freed
822  *
823  * @param mq pointer to MQ object
824  *
825  * @return none
826  */
827 void
828 hw_del_mq(hw_mq_t *mq)
829 {
830         if (mq != NULL) {
831                 ocs_list_remove(&mq->cq->q_list, mq);
832                 mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
833                 ocs_free(mq->cq->eq->hw->os, mq, sizeof(*mq));
834         }
835 }
836
837 /**
838  * @brief Free a WQ object
839  *
840  * The WQ object is freed
841  *
842  * @param wq pointer to WQ object
843  *
844  * @return none
845  */
846 void
847 hw_del_wq(hw_wq_t *wq)
848 {
849         if (wq != NULL) {
850                 ocs_list_remove(&wq->cq->q_list, wq);
851                 wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
852                 ocs_free(wq->cq->eq->hw->os, wq, sizeof(*wq));
853         }
854 }
855
856 /**
857  * @brief Free an RQ object
858  *
859  * The RQ object is freed
860  *
861  * @param rq pointer to RQ object
862  *
863  * @return none
864  */
865 void
866 hw_del_rq(hw_rq_t *rq)
867 {
868
869         if (rq != NULL) {
870                 ocs_hw_t *hw = rq->cq->eq->hw;
871                 /* Free RQ tracker */
872                 if (rq->rq_tracker != NULL) {
873                         ocs_free(hw->os, rq->rq_tracker, sizeof(ocs_hw_sequence_t*) * rq->entry_count);
874                         rq->rq_tracker = NULL;
875                 }
876                 ocs_list_remove(&rq->cq->q_list, rq);
877                 hw->hw_rq[rq->instance] = NULL;
878                 ocs_free(hw->os, rq, sizeof(*rq));
879         }
880 }
881
882 /**
883  * @brief Display HW queue objects
884  *
885  * The HW queue objects are displayed using ocs_log
886  *
887  * @param hw pointer to HW object
888  *
889  * @return none
890  */
891 void
892 hw_queue_dump(ocs_hw_t *hw)
893 {
894         hw_eq_t *eq;
895         hw_cq_t *cq;
896         hw_q_t *q;
897         hw_mq_t *mq;
898         hw_wq_t *wq;
899         hw_rq_t *rq;
900
901         ocs_list_foreach(&hw->eq_list, eq) {
902                 ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id);
903                 ocs_list_foreach(&eq->cq_list, cq) {
904                         ocs_printf("  cq[%d] id %2d current\n", cq->instance, cq->queue->id);
905                         ocs_list_foreach(&cq->q_list, q) {
906                                 switch(q->type) {
907                                 case SLI_QTYPE_MQ:
908                                         mq = (hw_mq_t *) q;
909                                         ocs_printf("    mq[%d] id %2d\n", mq->instance, mq->queue->id);
910                                         break;
911                                 case SLI_QTYPE_WQ:
912                                         wq = (hw_wq_t *) q;
913                                         ocs_printf("    wq[%d] id %2d\n", wq->instance, wq->queue->id);
914                                         break;
915                                 case SLI_QTYPE_RQ:
916                                         rq = (hw_rq_t *) q;
917                                         ocs_printf("    rq[%d] hdr id %2d\n", rq->instance, rq->hdr->id);
918                                         break;
919                                 default:
920                                         break;
921                                 }
922                         }
923                 }
924         }
925 }
926
927 /**
928  * @brief Teardown HW queue objects
929  *
930  * The HW queue objects are freed
931  *
932  * @param hw pointer to HW object
933  *
934  * @return none
935  */
936 void
937 hw_queue_teardown(ocs_hw_t *hw)
938 {
939         uint32_t i;
940         hw_eq_t *eq;
941         hw_eq_t *eq_next;
942
943         if (ocs_list_valid(&hw->eq_list)) {
944                 ocs_list_foreach_safe(&hw->eq_list, eq, eq_next) {
945                         hw_del_eq(eq);
946                 }
947         }
948         for (i = 0; i < ARRAY_SIZE(hw->wq_cpu_array); i++) {
949                 ocs_varray_free(hw->wq_cpu_array[i]);
950                 hw->wq_cpu_array[i] = NULL;
951         }
952         for (i = 0; i < ARRAY_SIZE(hw->wq_class_array); i++) {
953                 ocs_varray_free(hw->wq_class_array[i]);
954                 hw->wq_class_array[i] = NULL;
955         }
956 }
957
958 /**
959  * @brief Allocate a WQ to an IO object
960  *
961  * The next work queue index is used to assign a WQ to an IO.
962  *
963  * If wq_steering is OCS_HW_WQ_STEERING_CLASS, a WQ from io->wq_class is
964  * selected.
965  *
966  * If wq_steering is OCS_HW_WQ_STEERING_REQUEST, then a WQ from the EQ that
967  * the IO request came in on is selected.
968  *
969  * If wq_steering is OCS_HW_WQ_STEERING_CPU, then a WQ associted with the
970  * CPU the request is made on is selected.
971  *
972  * @param hw pointer to HW object
973  * @param io pointer to IO object
974  *
975  * @return Return pointer to next WQ
976  */
977 hw_wq_t *
978 ocs_hw_queue_next_wq(ocs_hw_t *hw, ocs_hw_io_t *io)
979 {
980         hw_eq_t *eq;
981         hw_wq_t *wq = NULL;
982
983         switch(io->wq_steering) {
984         case OCS_HW_WQ_STEERING_CLASS:
985                 if (likely(io->wq_class < ARRAY_SIZE(hw->wq_class_array))) {
986                         wq = ocs_varray_iter_next(hw->wq_class_array[io->wq_class]);
987                 }
988                 break;
989         case OCS_HW_WQ_STEERING_REQUEST:
990                 eq = io->eq;
991                 if (likely(eq != NULL)) {
992                         wq = ocs_varray_iter_next(eq->wq_array);
993                 }
994                 break;
995         case OCS_HW_WQ_STEERING_CPU: {
996                 uint32_t cpuidx = ocs_thread_getcpu();
997
998                 if (likely(cpuidx < ARRAY_SIZE(hw->wq_cpu_array))) {
999                         wq = ocs_varray_iter_next(hw->wq_cpu_array[cpuidx]);
1000                 }
1001                 break;
1002         }
1003         }
1004
1005         if (unlikely(wq == NULL)) {
1006                 wq = hw->hw_wq[0];
1007         }
1008
1009         return wq;
1010 }
1011
1012 /**
1013  * @brief Return count of EQs for a queue topology object
1014  *
1015  * The EQ count for in the HWs queue topology (hw->qtop) object is returned
1016  *
1017  * @param hw pointer to HW object
1018  *
1019  * @return count of EQs
1020  */
1021 uint32_t
1022 ocs_hw_qtop_eq_count(ocs_hw_t *hw)
1023 {
1024         return hw->qtop->entry_counts[QTOP_EQ];
1025 }
1026
1027 #define TOKEN_LEN               32
1028
1029 /**
1030  * @brief return string given a QTOP entry
1031  *
1032  * @param entry QTOP entry
1033  *
1034  * @return returns string or "unknown"
1035  */
1036 #if HW_QTOP_DEBUG
1037 static char *
1038 qtopentry2s(ocs_hw_qtop_entry_e entry) {
1039         switch(entry) {
1040         #define P(x)    case x: return #x;
1041         P(QTOP_EQ)
1042         P(QTOP_CQ)
1043         P(QTOP_WQ)
1044         P(QTOP_RQ)
1045         P(QTOP_MQ)
1046         P(QTOP_THREAD_START)
1047         P(QTOP_THREAD_END)
1048         P(QTOP_LAST)
1049         #undef P
1050         }
1051         return "unknown";
1052 }
1053 #endif
1054
1055 /**
1056  * @brief Declare token types
1057  */
1058 typedef enum {
1059         TOK_LPAREN = 1,
1060         TOK_RPAREN,
1061         TOK_COLON,
1062         TOK_EQUALS,
1063         TOK_QUEUE,
1064         TOK_ATTR_NAME,
1065         TOK_NUMBER,
1066         TOK_NUMBER_VALUE,
1067         TOK_NUMBER_LIST,
1068 } tok_type_e;
1069
1070 /**
1071  * @brief Declare token sub-types
1072  */
1073 typedef enum {
1074         TOK_SUB_EQ = 100,
1075         TOK_SUB_CQ,
1076         TOK_SUB_RQ,
1077         TOK_SUB_MQ,
1078         TOK_SUB_WQ,
1079         TOK_SUB_LEN,
1080         TOK_SUB_CLASS,
1081         TOK_SUB_ULP,
1082         TOK_SUB_FILTER,
1083 } tok_subtype_e;
1084
1085 /**
1086  * @brief convert queue subtype to QTOP entry
1087  *
1088  * @param q queue subtype
1089  *
1090  * @return QTOP entry or 0
1091  */
1092 static ocs_hw_qtop_entry_e
1093 subtype2qtop(tok_subtype_e q)
1094 {
1095         switch(q) {
1096         case TOK_SUB_EQ:        return QTOP_EQ;
1097         case TOK_SUB_CQ:        return QTOP_CQ;
1098         case TOK_SUB_RQ:        return QTOP_RQ;
1099         case TOK_SUB_MQ:        return QTOP_MQ;
1100         case TOK_SUB_WQ:        return QTOP_WQ;
1101         default:
1102                 break;
1103         }
1104         return 0;
1105 }
1106
1107 /**
1108  * @brief Declare token object
1109  */
1110 typedef struct {
1111         tok_type_e type;
1112         tok_subtype_e subtype;
1113         char string[TOKEN_LEN];
1114 } tok_t;
1115
1116 /**
1117  * @brief Declare token array object
1118  */
1119 typedef struct {
1120         tok_t *tokens;                  /* Pointer to array of tokens */
1121         uint32_t alloc_count;           /* Number of tokens in the array */
1122         uint32_t inuse_count;           /* Number of tokens posted to array */
1123         uint32_t iter_idx;              /* Iterator index */
1124 } tokarray_t;
1125
1126 /**
1127  * @brief Declare token match structure
1128  */
1129 typedef struct {
1130         char *s;
1131         tok_type_e type;
1132         tok_subtype_e subtype;
1133 } tokmatch_t;
1134
1135 /**
1136  * @brief test if character is ID start character
1137  *
1138  * @param c character to test
1139  *
1140  * @return TRUE if character is an ID start character
1141  */
1142 static int32_t
1143 idstart(int c)
1144 {
1145         return  isalpha(c) || (c == '_') || (c == '$');
1146 }
1147
1148 /**
1149  * @brief test if character is an ID character
1150  *
1151  * @param c character to test
1152  *
1153  * @return TRUE if character is an ID character
1154  */
1155 static int32_t
1156 idchar(int c)
1157 {
1158         return idstart(c) || ocs_isdigit(c);
1159 }
1160
1161 /**
1162  * @brief Declare single character matches
1163  */
1164 static tokmatch_t cmatches[] = {
1165         {"(", TOK_LPAREN},
1166         {")", TOK_RPAREN},
1167         {":", TOK_COLON},
1168         {"=", TOK_EQUALS},
1169 };
1170
1171 /**
1172  * @brief Declare identifier match strings
1173  */
1174 static tokmatch_t smatches[] = {
1175         {"eq", TOK_QUEUE, TOK_SUB_EQ},
1176         {"cq", TOK_QUEUE, TOK_SUB_CQ},
1177         {"rq", TOK_QUEUE, TOK_SUB_RQ},
1178         {"mq", TOK_QUEUE, TOK_SUB_MQ},
1179         {"wq", TOK_QUEUE, TOK_SUB_WQ},
1180         {"len", TOK_ATTR_NAME, TOK_SUB_LEN},
1181         {"class", TOK_ATTR_NAME, TOK_SUB_CLASS},
1182         {"ulp", TOK_ATTR_NAME, TOK_SUB_ULP},
1183         {"filter", TOK_ATTR_NAME, TOK_SUB_FILTER},
1184 };
1185
1186 /**
1187  * @brief Scan string and return next token
1188  *
1189  * The string is scanned and the next token is returned
1190  *
1191  * @param s input string to scan
1192  * @param tok pointer to place scanned token
1193  *
1194  * @return pointer to input string following scanned token, or NULL
1195  */
1196 static const char *
1197 tokenize(const char *s, tok_t *tok)
1198 {
1199         uint32_t i;
1200
1201         memset(tok, 0, sizeof(*tok));
1202
1203         /* Skip over whitespace */
1204         while (*s && ocs_isspace(*s)) {
1205                 s++;
1206         }
1207
1208         /* Return if nothing left in this string */
1209         if (*s == 0) {
1210                 return NULL;
1211         }
1212
1213         /* Look for single character matches */
1214         for (i = 0; i < ARRAY_SIZE(cmatches); i++) {
1215                 if (cmatches[i].s[0] == *s) {
1216                         tok->type = cmatches[i].type;
1217                         tok->subtype = cmatches[i].subtype;
1218                         tok->string[0] = *s++;
1219                         return s;
1220                 }
1221         }
1222
1223         /* Scan for a hex number or decimal */
1224         if ((s[0] == '0') && ((s[1] == 'x') || (s[1] == 'X'))) {
1225                 char *p = tok->string;
1226
1227                 tok->type = TOK_NUMBER;
1228
1229                 *p++ = *s++;
1230                 *p++ = *s++;
1231                 while ((*s == '.') || ocs_isxdigit(*s)) {
1232                         if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1233                                 *p++ = *s;
1234                         }
1235                         if (*s == ',') {
1236                                 tok->type = TOK_NUMBER_LIST;
1237                         }
1238                         s++;
1239                 }
1240                 *p = 0;
1241                 return s;
1242         } else if (ocs_isdigit(*s)) {
1243                 char *p = tok->string;
1244
1245                 tok->type = TOK_NUMBER;
1246                 while ((*s == ',') || ocs_isdigit(*s)) {
1247                         if ((p - tok->string) < (int32_t)sizeof(tok->string)) {
1248                                 *p++ = *s;
1249                         }
1250                         if (*s == ',') {
1251                                 tok->type = TOK_NUMBER_LIST;
1252                         }
1253                         s++;
1254                 }
1255                 *p = 0;
1256                 return s;
1257         }
1258
1259         /* Scan for an ID */
1260         if (idstart(*s)) {
1261                 char *p = tok->string;
1262
1263                 for (*p++ = *s++; idchar(*s); s++) {
1264                         if ((p - tok->string) < TOKEN_LEN) {
1265                                 *p++ = *s;
1266                         }
1267                 }
1268
1269                 /* See if this is a $ number value */
1270                 if (tok->string[0] == '$') {
1271                         tok->type = TOK_NUMBER_VALUE;
1272                 } else {
1273                         /* Look for a string match */
1274                         for (i = 0; i < ARRAY_SIZE(smatches); i++) {
1275                                 if (strcmp(smatches[i].s, tok->string) == 0) {
1276                                         tok->type = smatches[i].type;
1277                                         tok->subtype = smatches[i].subtype;
1278                                         return s;
1279                                 }
1280                         }
1281                 }
1282         }
1283         return s;
1284 }
1285
1286 /**
1287  * @brief convert token type to string
1288  *
1289  * @param type token type
1290  *
1291  * @return string, or "unknown"
1292  */
1293 static const char *
1294 token_type2s(tok_type_e type)
1295 {
1296         switch(type) {
1297         #define P(x)    case x: return #x;
1298         P(TOK_LPAREN)
1299         P(TOK_RPAREN)
1300         P(TOK_COLON)
1301         P(TOK_EQUALS)
1302         P(TOK_QUEUE)
1303         P(TOK_ATTR_NAME)
1304         P(TOK_NUMBER)
1305         P(TOK_NUMBER_VALUE)
1306         P(TOK_NUMBER_LIST)
1307         #undef P
1308         }
1309         return "unknown";
1310 }
1311
1312 /**
1313  * @brief convert token sub-type to string
1314  *
1315  * @param subtype token sub-type
1316  *
1317  * @return string, or "unknown"
1318  */
1319 static const char *
1320 token_subtype2s(tok_subtype_e subtype)
1321 {
1322         switch(subtype) {
1323         #define P(x)    case x: return #x;
1324         P(TOK_SUB_EQ)
1325         P(TOK_SUB_CQ)
1326         P(TOK_SUB_RQ)
1327         P(TOK_SUB_MQ)
1328         P(TOK_SUB_WQ)
1329         P(TOK_SUB_LEN)
1330         P(TOK_SUB_CLASS)
1331         P(TOK_SUB_ULP)
1332         P(TOK_SUB_FILTER)
1333         #undef P
1334         }
1335         return "";
1336 }
1337
1338 /**
1339  * @brief Generate syntax error message
1340  *
1341  * A syntax error message is found, the input tokens are dumped up to and including
1342  * the token that failed as indicated by the current iterator index.
1343  *
1344  * @param hw pointer to HW object
1345  * @param tokarray pointer to token array object
1346  *
1347  * @return none
1348  */
1349 static void
1350 tok_syntax(ocs_hw_t *hw, tokarray_t *tokarray)
1351 {
1352         uint32_t i;
1353         tok_t *tok;
1354
1355         ocs_log_test(hw->os, "Syntax error:\n");
1356
1357         for (i = 0, tok = tokarray->tokens; (i <= tokarray->inuse_count); i++, tok++) {
1358                 ocs_log_test(hw->os, "%s [%2d]    %-16s %-16s %s\n", (i == tokarray->iter_idx) ? ">>>" : "   ", i,
1359                         token_type2s(tok->type), token_subtype2s(tok->subtype), tok->string);
1360         }
1361 }
1362
1363 /**
1364  * @brief parse a number
1365  *
1366  * Parses tokens of type TOK_NUMBER and TOK_NUMBER_VALUE, returning a numeric value
1367  *
1368  * @param hw pointer to HW object
1369  * @param qtop pointer to QTOP object
1370  * @param tok pointer to token to parse
1371  *
1372  * @return numeric value
1373  */
1374 static uint32_t
1375 tok_getnumber(ocs_hw_t *hw, ocs_hw_qtop_t *qtop, tok_t *tok)
1376 {
1377         uint32_t rval = 0;
1378         uint32_t num_cpus = ocs_get_num_cpus();
1379
1380         switch(tok->type) {
1381         case TOK_NUMBER_VALUE:
1382                 if (ocs_strcmp(tok->string, "$ncpu") == 0) {
1383                         rval = num_cpus;
1384                 } else if (ocs_strcmp(tok->string, "$ncpu1") == 0) {
1385                         rval = num_cpus - 1;
1386                 } else if (ocs_strcmp(tok->string, "$nwq") == 0) {
1387                         if (hw != NULL) {
1388                                 rval = hw->config.n_wq;
1389                         }
1390                 } else if (ocs_strcmp(tok->string, "$maxmrq") == 0) {
1391                         rval = MIN(num_cpus, OCS_HW_MAX_MRQS);
1392                 } else if (ocs_strcmp(tok->string, "$nulp") == 0) {
1393                         rval = hw->ulp_max - hw->ulp_start + 1;
1394                 } else if ((qtop->rptcount_idx > 0) && ocs_strcmp(tok->string, "$rpt0") == 0) {
1395                         rval = qtop->rptcount[qtop->rptcount_idx-1];
1396                 } else if ((qtop->rptcount_idx > 1) && ocs_strcmp(tok->string, "$rpt1") == 0) {
1397                         rval = qtop->rptcount[qtop->rptcount_idx-2];
1398                 } else if ((qtop->rptcount_idx > 2) && ocs_strcmp(tok->string, "$rpt2") == 0) {
1399                         rval = qtop->rptcount[qtop->rptcount_idx-3];
1400                 } else if ((qtop->rptcount_idx > 3) && ocs_strcmp(tok->string, "$rpt3") == 0) {
1401                         rval = qtop->rptcount[qtop->rptcount_idx-4];
1402                 } else {
1403                         rval = ocs_strtoul(tok->string, 0, 0);
1404                 }
1405                 break;
1406         case TOK_NUMBER:
1407                 rval = ocs_strtoul(tok->string, 0, 0);
1408                 break;
1409         default:
1410                 break;
1411         }
1412         return rval;
1413 }
1414
1415 /**
1416  * @brief parse an array of tokens
1417  *
1418  * The tokens are semantically parsed, to generate QTOP entries.
1419  *
1420  * @param hw pointer to HW object
1421  * @param tokarray array array of tokens
1422  * @param qtop ouptut QTOP object
1423  *
1424  * @return returns 0 for success, a negative error code value for failure.
1425  */
1426 static int32_t
1427 parse_topology(ocs_hw_t *hw, tokarray_t *tokarray, ocs_hw_qtop_t *qtop)
1428 {
1429         ocs_hw_qtop_entry_t *qt = qtop->entries + qtop->inuse_count;
1430         tok_t *tok;
1431
1432         for (; (tokarray->iter_idx < tokarray->inuse_count) &&
1433              ((tok = &tokarray->tokens[tokarray->iter_idx]) != NULL); ) {
1434                 if (qtop->inuse_count >= qtop->alloc_count) {
1435                         return -1;
1436                 }
1437
1438                 qt = qtop->entries + qtop->inuse_count;
1439
1440                 switch (tok[0].type)
1441                 {
1442                 case TOK_QUEUE:
1443                         qt->entry = subtype2qtop(tok[0].subtype);
1444                         qt->set_default = FALSE;
1445                         qt->len = 0;
1446                         qt->class = 0;
1447                         qtop->inuse_count++;
1448
1449                         tokarray->iter_idx++;           /* Advance current token index */
1450
1451                         /* Parse for queue attributes, possibly multiple instances */
1452                         while ((tokarray->iter_idx + 4) <= tokarray->inuse_count) {
1453                                 tok = &tokarray->tokens[tokarray->iter_idx];
1454                                 if(     (tok[0].type == TOK_COLON) &&
1455                                         (tok[1].type == TOK_ATTR_NAME) &&
1456                                         (tok[2].type == TOK_EQUALS) &&
1457                                         ((tok[3].type == TOK_NUMBER) ||
1458                                          (tok[3].type == TOK_NUMBER_VALUE) ||
1459                                          (tok[3].type == TOK_NUMBER_LIST))) {
1460                                         switch (tok[1].subtype) {
1461                                         case TOK_SUB_LEN:
1462                                                 qt->len = tok_getnumber(hw, qtop, &tok[3]);
1463                                                 break;
1464
1465                                         case TOK_SUB_CLASS:
1466                                                 qt->class = tok_getnumber(hw, qtop, &tok[3]);
1467                                                 break;
1468
1469                                         case TOK_SUB_ULP:
1470                                                 qt->ulp = tok_getnumber(hw, qtop, &tok[3]);
1471                                                 break;
1472
1473                                         case TOK_SUB_FILTER:
1474                                                 if (tok[3].type == TOK_NUMBER_LIST) {
1475                                                         uint32_t mask = 0;
1476                                                         char *p = tok[3].string;
1477
1478                                                         while ((p != NULL) && *p) {
1479                                                                 uint32_t v;
1480
1481                                                                 v = ocs_strtoul(p, 0, 0);
1482                                                                 if (v < 32) {
1483                                                                         mask |= (1U << v);
1484                                                                 }
1485
1486                                                                 p = ocs_strchr(p, ',');
1487                                                                 if (p != NULL) {
1488                                                                         p++;
1489                                                                 }
1490                                                         }
1491                                                         qt->filter_mask = mask;
1492                                                 } else {
1493                                                         qt->filter_mask = (1U << tok_getnumber(hw, qtop, &tok[3]));
1494                                                 }
1495                                                 break;
1496                                         default:
1497                                                 break;
1498                                         }
1499                                         /* Advance current token index */
1500                                         tokarray->iter_idx += 4;
1501                                 } else {
1502                                         break;
1503                                 }
1504                         }
1505                         qtop->entry_counts[qt->entry]++;
1506                         break;
1507
1508                 case TOK_ATTR_NAME:
1509                         if (    ((tokarray->iter_idx + 5) <= tokarray->inuse_count) &&
1510                                 (tok[1].type == TOK_COLON) &&
1511                                 (tok[2].type == TOK_QUEUE) &&
1512                                 (tok[3].type == TOK_EQUALS) &&
1513                                 ((tok[4].type == TOK_NUMBER) || (tok[4].type == TOK_NUMBER_VALUE))) {
1514                                 qt->entry = subtype2qtop(tok[2].subtype);
1515                                 qt->set_default = TRUE;
1516                                 switch(tok[0].subtype) {
1517                                 case TOK_SUB_LEN:
1518                                         qt->len = tok_getnumber(hw, qtop, &tok[4]);
1519                                         break;
1520                                 case TOK_SUB_CLASS:
1521                                         qt->class = tok_getnumber(hw, qtop, &tok[4]);
1522                                         break;
1523                                 case TOK_SUB_ULP:
1524                                         qt->ulp = tok_getnumber(hw, qtop, &tok[4]);
1525                                         break;
1526                                 default:
1527                                         break;
1528                                 }
1529                                 qtop->inuse_count++;
1530                                 tokarray->iter_idx += 5;
1531                         } else {
1532                                 tok_syntax(hw, tokarray);
1533                                 return -1;
1534                         }
1535                         break;
1536
1537                 case TOK_NUMBER:
1538                 case TOK_NUMBER_VALUE: {
1539                         uint32_t rpt_count = 1;
1540                         uint32_t i;
1541
1542                         rpt_count = tok_getnumber(hw, qtop, tok);
1543
1544                         if (tok[1].type == TOK_LPAREN) {
1545                                 uint32_t iter_idx_save;
1546
1547                                 tokarray->iter_idx += 2;
1548
1549                                 /* save token array iteration index */
1550                                 iter_idx_save = tokarray->iter_idx;
1551
1552                                 for (i = 0; i < rpt_count; i++) {
1553                                         uint32_t rptcount_idx = qtop->rptcount_idx;
1554
1555                                         if (qtop->rptcount_idx < ARRAY_SIZE(qtop->rptcount)) {
1556                                                 qtop->rptcount[qtop->rptcount_idx++] = i;
1557                                         }
1558
1559                                         /* restore token array iteration index */
1560                                         tokarray->iter_idx = iter_idx_save;
1561
1562                                         /* parse, append to qtop */
1563                                         parse_topology(hw, tokarray, qtop);
1564
1565                                         qtop->rptcount_idx = rptcount_idx;
1566                                 }
1567                         }
1568                         break;
1569                 }
1570
1571                 case TOK_RPAREN:
1572                         tokarray->iter_idx++;
1573                         return 0;
1574
1575                 default:
1576                         tok_syntax(hw, tokarray);
1577                         return -1;
1578                 }
1579         }
1580         return 0;
1581 }
1582
1583 /**
1584  * @brief Parse queue topology string
1585  *
1586  * The queue topology object is allocated, and filled with the results of parsing the
1587  * passed in queue topology string
1588  *
1589  * @param hw pointer to HW object
1590  * @param qtop_string input queue topology string
1591  *
1592  * @return pointer to allocated QTOP object, or NULL if there was an error
1593  */
1594 ocs_hw_qtop_t *
1595 ocs_hw_qtop_parse(ocs_hw_t *hw, const char *qtop_string)
1596 {
1597         ocs_hw_qtop_t *qtop;
1598         tokarray_t tokarray;
1599         const char *s;
1600 #if HW_QTOP_DEBUG
1601         uint32_t i;
1602         ocs_hw_qtop_entry_t *qt;
1603 #endif
1604
1605         ocs_log_debug(hw->os, "queue topology: %s\n", qtop_string);
1606
1607         /* Allocate a token array */
1608         tokarray.tokens = ocs_malloc(hw->os, MAX_TOKENS * sizeof(*tokarray.tokens), OCS_M_ZERO | OCS_M_NOWAIT);
1609         if (tokarray.tokens == NULL) {
1610                 return NULL;
1611         }
1612         tokarray.alloc_count = MAX_TOKENS;
1613         tokarray.inuse_count = 0;
1614         tokarray.iter_idx = 0;
1615
1616         /* Parse the tokens */
1617         for (s = qtop_string; (tokarray.inuse_count < tokarray.alloc_count) &&
1618              ((s = tokenize(s, &tokarray.tokens[tokarray.inuse_count]))) != NULL; ) {
1619                 tokarray.inuse_count++;
1620         }
1621
1622         /* Allocate a queue topology structure */
1623         qtop = ocs_malloc(hw->os, sizeof(*qtop), OCS_M_ZERO | OCS_M_NOWAIT);
1624         if (qtop == NULL) {
1625                 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1626                 ocs_log_err(hw->os, "malloc qtop failed\n");
1627                 return NULL;
1628         }
1629         qtop->os = hw->os;
1630
1631         /* Allocate queue topology entries */
1632         qtop->entries = ocs_malloc(hw->os, OCS_HW_MAX_QTOP_ENTRIES*sizeof(*qtop->entries), OCS_M_ZERO | OCS_M_NOWAIT);
1633         if (qtop->entries == NULL) {
1634                 ocs_log_err(hw->os, "malloc qtop entries failed\n");
1635                 ocs_free(hw->os, qtop, sizeof(*qtop));
1636                 ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1637                 return NULL;
1638         }
1639         qtop->alloc_count = OCS_HW_MAX_QTOP_ENTRIES;
1640         qtop->inuse_count = 0;
1641
1642         /* Parse the tokens */
1643         parse_topology(hw, &tokarray, qtop);
1644 #if HW_QTOP_DEBUG
1645         for (i = 0, qt = qtop->entries; i < qtop->inuse_count; i++, qt++) {
1646                 ocs_log_debug(hw->os, "entry %s set_df %d len %4d class %d ulp %d\n", qtopentry2s(qt->entry), qt->set_default, qt->len,
1647                        qt->class, qt->ulp);
1648         }
1649 #endif
1650
1651         /* Free the tokens array */
1652         ocs_free(hw->os, tokarray.tokens, MAX_TOKENS * sizeof(*tokarray.tokens));
1653
1654         return qtop;
1655 }
1656
1657 /**
1658  * @brief free queue topology object
1659  *
1660  * @param qtop pointer to QTOP object
1661  *
1662  * @return none
1663  */
1664 void
1665 ocs_hw_qtop_free(ocs_hw_qtop_t *qtop)
1666 {
1667         if (qtop != NULL) {
1668                 if (qtop->entries != NULL) {
1669                         ocs_free(qtop->os, qtop->entries, qtop->alloc_count*sizeof(*qtop->entries));
1670                 }
1671                 ocs_free(qtop->os, qtop, sizeof(*qtop));
1672         }
1673 }
1674
1675 /* Uncomment this to turn on RQ debug */
1676 // #define ENABLE_DEBUG_RQBUF
1677
1678 static int32_t ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id);
1679 static ocs_hw_sequence_t * ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex);
1680 static int32_t ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1681 static ocs_hw_rtn_e ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq);
1682
1683 /**
1684  * @brief Process receive queue completions for RQ Pair mode.
1685  *
1686  * @par Description
1687  * RQ completions are processed. In RQ pair mode, a single header and single payload
1688  * buffer are received, and passed to the function that has registered for unsolicited
1689  * callbacks.
1690  *
1691  * @param hw Hardware context.
1692  * @param cq Pointer to HW completion queue.
1693  * @param cqe Completion queue entry.
1694  *
1695  * @return Returns 0 for success, or a negative error code value for failure.
1696  */
1697
1698 int32_t
1699 ocs_hw_rqpair_process_rq(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1700 {
1701         uint16_t rq_id;
1702         uint32_t index;
1703         int32_t rqindex;
1704         int32_t  rq_status;
1705         uint32_t h_len;
1706         uint32_t p_len;
1707         ocs_hw_sequence_t *seq;
1708
1709         rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1710         if (0 != rq_status) {
1711                 switch (rq_status) {
1712                 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1713                 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1714                         /* just get RQ buffer then return to chip */
1715                         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1716                         if (rqindex < 0) {
1717                                 ocs_log_test(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1718                                              rq_status, rq_id);
1719                                 break;
1720                         }
1721
1722                         /* get RQ buffer */
1723                         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1724
1725                         /* return to chip */
1726                         if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1727                                 ocs_log_test(hw->os, "status=%#x, failed to return buffers to RQ\n",
1728                                              rq_status);
1729                                 break;
1730                         }
1731                         break;
1732                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1733                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1734                         /* since RQ buffers were not consumed, cannot return them to chip */
1735                         /* fall through */
1736                         ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1737                 default:
1738                         break;
1739                 }
1740                 return -1;
1741         }
1742
1743         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1744         if (rqindex < 0) {
1745                 ocs_log_test(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1746                 return -1;
1747         }
1748
1749         OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1750                  rq->payload_use_count++;})
1751
1752         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1753         ocs_hw_assert(seq != NULL);
1754
1755         seq->hw = hw;
1756         seq->auto_xrdy = 0;
1757         seq->out_of_xris = 0;
1758         seq->xri = 0;
1759         seq->hio = NULL;
1760
1761         sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1762         seq->header->dma.len = h_len;
1763         seq->payload->dma.len = p_len;
1764         seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1765         seq->hw_priv = cq->eq;
1766
1767         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1768         if (hw->config.bounce) {
1769                 fc_header_t *hdr = seq->header->dma.virt;
1770                 uint32_t s_id = fc_be24toh(hdr->s_id);
1771                 uint32_t d_id = fc_be24toh(hdr->d_id);
1772                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1773                 if (hw->callback.bounce != NULL) {
1774                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1775                 }
1776         } else {
1777                 hw->callback.unsolicited(hw->args.unsolicited, seq);
1778         }
1779
1780         return 0;
1781 }
1782
1783 /**
1784  * @brief Process receive queue completions for RQ Pair mode - Auto xfer rdy
1785  *
1786  * @par Description
1787  * RQ completions are processed. In RQ pair mode, a single header and single payload
1788  * buffer are received, and passed to the function that has registered for unsolicited
1789  * callbacks.
1790  *
1791  * @param hw Hardware context.
1792  * @param cq Pointer to HW completion queue.
1793  * @param cqe Completion queue entry.
1794  *
1795  * @return Returns 0 for success, or a negative error code value for failure.
1796  */
1797
1798 int32_t
1799 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1800 {
1801         /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1802         sli4_fc_optimized_write_cmd_cqe_t *opt_wr = (sli4_fc_optimized_write_cmd_cqe_t*)cqe;
1803         uint16_t rq_id;
1804         uint32_t index;
1805         int32_t rqindex;
1806         int32_t  rq_status;
1807         uint32_t h_len;
1808         uint32_t p_len;
1809         ocs_hw_sequence_t *seq;
1810         uint8_t axr_lock_taken = 0;
1811 #if defined(OCS_DISC_SPIN_DELAY)
1812         uint32_t        delay = 0;
1813         char            prop_buf[32];
1814 #endif
1815
1816         rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe, &rq_id, &index);
1817         if (0 != rq_status) {
1818                 switch (rq_status) {
1819                 case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
1820                 case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
1821                         /* just get RQ buffer then return to chip */
1822                         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1823                         if (rqindex < 0) {
1824                                 ocs_log_err(hw->os, "status=%#x: rq_id lookup failed for id=%#x\n",
1825                                             rq_status, rq_id);
1826                                 break;
1827                         }
1828
1829                         /* get RQ buffer */
1830                         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1831
1832                         /* return to chip */
1833                         if (ocs_hw_rqpair_sequence_free(hw, seq)) {
1834                                 ocs_log_err(hw->os, "status=%#x, failed to return buffers to RQ\n",
1835                                             rq_status);
1836                                 break;
1837                         }
1838                         break;
1839                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
1840                 case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
1841                         /* since RQ buffers were not consumed, cannot return them to chip */
1842                         ocs_log_debug(hw->os, "Warning: RCQE status=%#x, \n", rq_status);
1843                         /* fall through */
1844                 default:
1845                         break;
1846                 }
1847                 return -1;
1848         }
1849
1850         rqindex = ocs_hw_rqpair_find(hw, rq_id);
1851         if (rqindex < 0) {
1852                 ocs_log_err(hw->os, "Error: rq_id lookup failed for id=%#x\n", rq_id);
1853                 return -1;
1854         }
1855
1856         OCS_STAT({ hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]]; rq->use_count++; rq->hdr_use_count++;
1857                  rq->payload_use_count++;})
1858
1859         seq = ocs_hw_rqpair_get(hw, rqindex, index);
1860         ocs_hw_assert(seq != NULL);
1861
1862         seq->hw = hw;
1863         seq->auto_xrdy = opt_wr->agxr;
1864         seq->out_of_xris = opt_wr->oox;
1865         seq->xri = opt_wr->xri;
1866         seq->hio = NULL;
1867
1868         sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
1869         seq->header->dma.len = h_len;
1870         seq->payload->dma.len = p_len;
1871         seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
1872         seq->hw_priv = cq->eq;
1873
1874         if (seq->auto_xrdy) {
1875                 fc_header_t *fc_hdr = seq->header->dma.virt;
1876
1877                 seq->hio = ocs_hw_io_lookup(hw, seq->xri);
1878                 ocs_lock(&seq->hio->axr_lock);
1879                 axr_lock_taken = 1;
1880
1881                 /* save the FCFI, src_id, dest_id and ox_id because we need it for the sequence object when the data comes. */
1882                 seq->hio->axr_buf->fcfi = seq->fcfi;
1883                 seq->hio->axr_buf->hdr.ox_id = fc_hdr->ox_id;
1884                 seq->hio->axr_buf->hdr.s_id = fc_hdr->s_id;
1885                 seq->hio->axr_buf->hdr.d_id = fc_hdr->d_id;
1886                 seq->hio->axr_buf->cmd_cqe = 1;
1887
1888                 /*
1889                  * Since auto xfer rdy is used for this IO, then clear the sequence
1890                  * initiative bit in the header so that the upper layers wait for the
1891                  * data. This should flow exactly like the first burst case.
1892                  */
1893                 fc_hdr->f_ctl &= fc_htobe24(~FC_FCTL_SEQUENCE_INITIATIVE);
1894
1895                 /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
1896                 if (seq->hio->type == OCS_HW_IO_TARGET_RSP) {
1897                         seq->hio->axr_buf->call_axr_cmd = 1;
1898                         seq->hio->axr_buf->cmd_seq = seq;
1899                         goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd;
1900                 }
1901         }
1902
1903         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1904         if (hw->config.bounce) {
1905                 fc_header_t *hdr = seq->header->dma.virt;
1906                 uint32_t s_id = fc_be24toh(hdr->s_id);
1907                 uint32_t d_id = fc_be24toh(hdr->d_id);
1908                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1909                 if (hw->callback.bounce != NULL) {
1910                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
1911                 }
1912         } else {
1913                 hw->callback.unsolicited(hw->args.unsolicited, seq);
1914         }
1915
1916         if (seq->auto_xrdy) {
1917                 /* If data cqe came before cmd cqe in out of order in case of AXR */
1918                 if(seq->hio->axr_buf->data_cqe == 1) {
1919 #if defined(OCS_DISC_SPIN_DELAY)
1920                         if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
1921                                 delay = ocs_strtoul(prop_buf, 0, 0);
1922                                 ocs_udelay(delay);
1923                         }
1924 #endif
1925                         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
1926                         if (hw->config.bounce) {
1927                                 fc_header_t *hdr = seq->header->dma.virt;
1928                                 uint32_t s_id = fc_be24toh(hdr->s_id);
1929                                 uint32_t d_id = fc_be24toh(hdr->d_id);
1930                                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
1931                                 if (hw->callback.bounce != NULL) {
1932                                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &seq->hio->axr_buf->seq, s_id, d_id, ox_id);
1933                                 }
1934                         } else {
1935                                 hw->callback.unsolicited(hw->args.unsolicited, &seq->hio->axr_buf->seq);
1936                         }
1937                 }
1938         }
1939
1940 exit_ocs_hw_rqpair_process_auto_xfr_rdy_cmd:
1941         if(axr_lock_taken) {
1942                 ocs_unlock(&seq->hio->axr_lock);
1943         }
1944         return 0;
1945 }
1946
1947 /**
1948  * @brief Process CQ completions for Auto xfer rdy data phases.
1949  *
1950  * @par Description
1951  * The data is DMA'd into the data buffer posted to the SGL prior to the XRI
1952  * being assigned to an IO. When the completion is received, All of the data
1953  * is in the single buffer.
1954  *
1955  * @param hw Hardware context.
1956  * @param cq Pointer to HW completion queue.
1957  * @param cqe Completion queue entry.
1958  *
1959  * @return Returns 0 for success, or a negative error code value for failure.
1960  */
1961
1962 int32_t
1963 ocs_hw_rqpair_process_auto_xfr_rdy_data(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe)
1964 {
1965         /* Seems silly to call a SLI function to decode - use the structure directly for performance */
1966         sli4_fc_optimized_write_data_cqe_t *opt_wr = (sli4_fc_optimized_write_data_cqe_t*)cqe;
1967         ocs_hw_sequence_t *seq;
1968         ocs_hw_io_t *io;
1969         ocs_hw_auto_xfer_rdy_buffer_t *buf;
1970 #if defined(OCS_DISC_SPIN_DELAY)
1971         uint32_t        delay = 0;
1972         char            prop_buf[32];
1973 #endif
1974         /* Look up the IO */
1975         io = ocs_hw_io_lookup(hw, opt_wr->xri);
1976         ocs_lock(&io->axr_lock);
1977         buf = io->axr_buf;
1978         buf->data_cqe = 1;
1979         seq = &buf->seq;
1980         seq->hw = hw;
1981         seq->auto_xrdy = 1;
1982         seq->out_of_xris = 0;
1983         seq->xri = opt_wr->xri;
1984         seq->hio = io;
1985         seq->header = &buf->header;
1986         seq->payload = &buf->payload;
1987
1988         seq->header->dma.len = sizeof(fc_header_t);
1989         seq->payload->dma.len = opt_wr->total_data_placed;
1990         seq->fcfi = buf->fcfi;
1991         seq->hw_priv = cq->eq;
1992
1993         if (opt_wr->status == SLI4_FC_WCQE_STATUS_SUCCESS) {
1994                 seq->status = OCS_HW_UNSOL_SUCCESS;
1995         } else if (opt_wr->status == SLI4_FC_WCQE_STATUS_REMOTE_STOP) {
1996                 seq->status = OCS_HW_UNSOL_ABTS_RCVD;
1997         } else {
1998                 seq->status = OCS_HW_UNSOL_ERROR;
1999         }
2000
2001         /* If AXR CMD CQE came before previous TRSP CQE of same XRI */
2002         if(io->type == OCS_HW_IO_TARGET_RSP) {
2003                 io->axr_buf->call_axr_data = 1;
2004                 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2005         }
2006
2007         if(!buf->cmd_cqe) {
2008                 /* if data cqe came before cmd cqe, return here, cmd cqe will handle */
2009                 goto exit_ocs_hw_rqpair_process_auto_xfr_rdy_data;
2010         }
2011 #if defined(OCS_DISC_SPIN_DELAY)
2012         if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
2013                 delay = ocs_strtoul(prop_buf, 0, 0);
2014                 ocs_udelay(delay);
2015         }
2016 #endif
2017
2018         /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
2019         if (hw->config.bounce) {
2020                 fc_header_t *hdr = seq->header->dma.virt;
2021                 uint32_t s_id = fc_be24toh(hdr->s_id);
2022                 uint32_t d_id = fc_be24toh(hdr->d_id);
2023                 uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
2024                 if (hw->callback.bounce != NULL) {
2025                         (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, seq, s_id, d_id, ox_id);
2026                 }
2027         } else {
2028                 hw->callback.unsolicited(hw->args.unsolicited, seq);
2029         }
2030
2031 exit_ocs_hw_rqpair_process_auto_xfr_rdy_data:
2032         ocs_unlock(&io->axr_lock);
2033         return 0;
2034 }
2035
2036 /**
2037  * @brief Return pointer to RQ buffer entry.
2038  *
2039  * @par Description
2040  * Returns a pointer to the RQ buffer entry given by @c rqindex and @c bufindex.
2041  *
2042  * @param hw Hardware context.
2043  * @param rqindex Index of the RQ that is being processed.
2044  * @param bufindex Index into the RQ that is being processed.
2045  *
2046  * @return Pointer to the sequence structure, or NULL otherwise.
2047  */
2048 static ocs_hw_sequence_t *
2049 ocs_hw_rqpair_get(ocs_hw_t *hw, uint16_t rqindex, uint16_t bufindex)
2050 {
2051         sli4_queue_t *rq_hdr = &hw->rq[rqindex];
2052         sli4_queue_t *rq_payload = &hw->rq[rqindex+1];
2053         ocs_hw_sequence_t *seq = NULL;
2054         hw_rq_t *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
2055
2056 #if defined(ENABLE_DEBUG_RQBUF)
2057         uint64_t rqbuf_debug_value = 0xdead0000 | ((rq->id & 0xf) << 12) | (bufindex & 0xfff);
2058 #endif
2059
2060         if (bufindex >= rq_hdr->length) {
2061                 ocs_log_err(hw->os, "RQ index %d bufindex %d exceed ring length %d for id %d\n",
2062                             rqindex, bufindex, rq_hdr->length, rq_hdr->id);
2063                 return NULL;
2064         }
2065
2066         sli_queue_lock(rq_hdr);
2067         sli_queue_lock(rq_payload);
2068
2069 #if defined(ENABLE_DEBUG_RQBUF)
2070         /* Put a debug value into the rq, to track which entries are still valid */
2071         _sli_queue_poke(&hw->sli, rq_hdr, bufindex, (uint8_t *)&rqbuf_debug_value);
2072         _sli_queue_poke(&hw->sli, rq_payload, bufindex, (uint8_t *)&rqbuf_debug_value);
2073 #endif
2074
2075         seq = rq->rq_tracker[bufindex];
2076         rq->rq_tracker[bufindex] = NULL;
2077
2078         if (seq == NULL ) {
2079                 ocs_log_err(hw->os, "RQ buffer NULL, rqindex %d, bufindex %d, current q index = %d\n",
2080                             rqindex, bufindex, rq_hdr->index);
2081         }
2082
2083         sli_queue_unlock(rq_payload);
2084         sli_queue_unlock(rq_hdr);
2085         return seq;
2086 }
2087
2088 /**
2089  * @brief Posts an RQ buffer to a queue and update the verification structures
2090  *
2091  * @param hw            hardware context
2092  * @param seq Pointer to sequence object.
2093  *
2094  * @return Returns 0 on success, or a non-zero value otherwise.
2095  */
2096 static int32_t
2097 ocs_hw_rqpair_put(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2098 {
2099         sli4_queue_t *rq_hdr = &hw->rq[seq->header->rqindex];
2100         sli4_queue_t *rq_payload = &hw->rq[seq->payload->rqindex];
2101         uint32_t hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
2102         hw_rq_t *rq = hw->hw_rq[hw_rq_index];
2103         uint32_t     phys_hdr[2];
2104         uint32_t     phys_payload[2];
2105         int32_t      qindex_hdr;
2106         int32_t      qindex_payload;
2107
2108         /* Update the RQ verification lookup tables */
2109         phys_hdr[0] = ocs_addr32_hi(seq->header->dma.phys);
2110         phys_hdr[1] = ocs_addr32_lo(seq->header->dma.phys);
2111         phys_payload[0] = ocs_addr32_hi(seq->payload->dma.phys);
2112         phys_payload[1] = ocs_addr32_lo(seq->payload->dma.phys);
2113
2114         sli_queue_lock(rq_hdr);
2115         sli_queue_lock(rq_payload);
2116
2117         /*
2118          * Note: The header must be posted last for buffer pair mode because
2119          *       posting on the header queue posts the payload queue as well.
2120          *       We do not ring the payload queue independently in RQ pair mode.
2121          */
2122         qindex_payload = _sli_queue_write(&hw->sli, rq_payload, (void *)phys_payload);
2123         qindex_hdr = _sli_queue_write(&hw->sli, rq_hdr, (void *)phys_hdr);
2124         if (qindex_hdr < 0 ||
2125             qindex_payload < 0) {
2126                 ocs_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
2127                 sli_queue_unlock(rq_payload);
2128                 sli_queue_unlock(rq_hdr);
2129                 return OCS_HW_RTN_ERROR;
2130         }
2131
2132         /* ensure the indexes are the same */
2133         ocs_hw_assert(qindex_hdr == qindex_payload);
2134
2135         /* Update the lookup table */
2136         if (rq->rq_tracker[qindex_hdr] == NULL) {
2137                 rq->rq_tracker[qindex_hdr] = seq;
2138         } else {
2139                 ocs_log_test(hw->os, "expected rq_tracker[%d][%d] buffer to be NULL\n",
2140                              hw_rq_index, qindex_hdr);
2141         }
2142
2143         sli_queue_unlock(rq_payload);
2144         sli_queue_unlock(rq_hdr);
2145         return OCS_HW_RTN_SUCCESS;
2146 }
2147
2148 /**
2149  * @brief Return RQ buffers (while in RQ pair mode).
2150  *
2151  * @par Description
2152  * The header and payload buffers are returned to the Receive Queue.
2153  *
2154  * @param hw Hardware context.
2155  * @param seq Header/payload sequence buffers.
2156  *
2157  * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code value on failure.
2158  */
2159
2160 ocs_hw_rtn_e
2161 ocs_hw_rqpair_sequence_free(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2162 {
2163         ocs_hw_rtn_e   rc = OCS_HW_RTN_SUCCESS;
2164
2165         /* Check for auto xfer rdy dummy buffers and call the proper release function. */
2166         if (seq->header->rqindex == OCS_HW_RQ_INDEX_DUMMY_HDR) {
2167                 return ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(hw, seq);
2168         }
2169
2170         /*
2171          * Post the data buffer first. Because in RQ pair mode, ringing the
2172          * doorbell of the header ring will post the data buffer as well.
2173          */
2174         if (ocs_hw_rqpair_put(hw, seq)) {
2175                 ocs_log_err(hw->os, "error writing buffers\n");
2176                 return OCS_HW_RTN_ERROR;
2177         }
2178
2179         return rc;
2180 }
2181
2182 /**
2183  * @brief Find the RQ index of RQ_ID.
2184  *
2185  * @param hw Hardware context.
2186  * @param rq_id RQ ID to find.
2187  *
2188  * @return Returns the RQ index, or -1 if not found
2189  */
2190 static inline int32_t
2191 ocs_hw_rqpair_find(ocs_hw_t *hw, uint16_t rq_id)
2192 {
2193         return ocs_hw_queue_hash_find(hw->rq_hash, rq_id);
2194 }
2195
2196 /**
2197  * @ingroup devInitShutdown
2198  * @brief Allocate auto xfer rdy buffers.
2199  *
2200  * @par Description
2201  * Allocates the auto xfer rdy buffers and places them on the free list.
2202  *
2203  * @param hw Hardware context allocated by the caller.
2204  * @param num_buffers Number of buffers to allocate.
2205  *
2206  * @return Returns 0 on success, or a non-zero value on failure.
2207  */
2208 ocs_hw_rtn_e
2209 ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(ocs_hw_t *hw, uint32_t num_buffers)
2210 {
2211         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2212         uint32_t i;
2213
2214         hw->auto_xfer_rdy_buf_pool = ocs_pool_alloc(hw->os, sizeof(ocs_hw_auto_xfer_rdy_buffer_t), num_buffers, FALSE);
2215         if (hw->auto_xfer_rdy_buf_pool == NULL) {
2216                 ocs_log_err(hw->os, "Failure to allocate auto xfer ready buffer pool\n");
2217                 return OCS_HW_RTN_NO_MEMORY;
2218         }
2219
2220         for (i = 0; i < num_buffers; i++) {
2221                 /* allocate the wrapper object */
2222                 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2223                 ocs_hw_assert(buf != NULL);
2224
2225                 /* allocate the auto xfer ready buffer */
2226                 if (ocs_dma_alloc(hw->os, &buf->payload.dma, hw->config.auto_xfer_rdy_size, OCS_MIN_DMA_ALIGNMENT)) {
2227                         ocs_log_err(hw->os, "DMA allocation failed\n");
2228                         ocs_free(hw->os, buf, sizeof(*buf));
2229                         return OCS_HW_RTN_NO_MEMORY;
2230                 }
2231
2232                 /* build a fake data header in big endian */
2233                 buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2234                 buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2235                 buf->hdr.type = FC_TYPE_FCP;
2236                 buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2237                                             FC_FCTL_FIRST_SEQUENCE |
2238                                             FC_FCTL_LAST_SEQUENCE |
2239                                             FC_FCTL_END_SEQUENCE |
2240                                             FC_FCTL_SEQUENCE_INITIATIVE);
2241
2242                 /* build the fake header DMA object */
2243                 buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2244                 buf->header.dma.virt = &buf->hdr;
2245                 buf->header.dma.alloc = buf;
2246                 buf->header.dma.size = sizeof(buf->hdr);
2247                 buf->header.dma.len = sizeof(buf->hdr);
2248
2249                 buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2250         }
2251         return OCS_HW_RTN_SUCCESS;
2252 }
2253
2254 /**
2255  * @ingroup devInitShutdown
2256  * @brief Post Auto xfer rdy buffers to the XRIs posted with DNRX.
2257  *
2258  * @par Description
2259  * When new buffers are freed, check existing XRIs waiting for buffers.
2260  *
2261  * @param hw Hardware context allocated by the caller.
2262  */
2263 static void
2264 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(ocs_hw_t *hw)
2265 {
2266         ocs_hw_io_t *io;
2267         int32_t rc;
2268
2269         ocs_lock(&hw->io_lock);
2270
2271         while (!ocs_list_empty(&hw->io_port_dnrx)) {
2272                 io = ocs_list_remove_head(&hw->io_port_dnrx);
2273                 rc = ocs_hw_reque_xri(hw, io);
2274                 if(rc) {
2275                         break;
2276                 }
2277         }
2278
2279         ocs_unlock(&hw->io_lock);
2280 }
2281
2282 /**
2283  * @brief Called when the POST_SGL_PAGE command completes.
2284  *
2285  * @par Description
2286  * Free the mailbox command buffer.
2287  *
2288  * @param hw Hardware context.
2289  * @param status Status field from the mbox completion.
2290  * @param mqe Mailbox response structure.
2291  * @param arg Pointer to a callback function that signals the caller that the command is done.
2292  *
2293  * @return Returns 0.
2294  */
2295 static int32_t
2296 ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
2297 {
2298         if (status != 0) {
2299                 ocs_log_debug(hw->os, "Status 0x%x\n", status);
2300         }
2301
2302         ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2303         return 0;
2304 }
2305
2306 /**
2307  * @brief Prepares an XRI to move to the chip.
2308  *
2309  * @par Description
2310  * Puts the data SGL into the SGL list for the IO object and possibly registers
2311  * an SGL list for the XRI. Since both the POST_XRI and POST_SGL_PAGES commands are
2312  * mailbox commands, we don't need to wait for completion before preceding.
2313  *
2314  * @param hw Hardware context allocated by the caller.
2315  * @param io Pointer to the IO object.
2316  *
2317  * @return Returns OCS_HW_RTN_SUCCESS for success, or an error code value for failure.
2318  */
2319 ocs_hw_rtn_e
2320 ocs_hw_rqpair_auto_xfer_rdy_move_to_port(ocs_hw_t *hw, ocs_hw_io_t *io)
2321 {
2322         /* We only need to preregister the SGL if it has not yet been done. */
2323         if (!sli_get_sgl_preregister(&hw->sli)) {
2324                 uint8_t *post_sgl;
2325                 ocs_dma_t *psgls = &io->def_sgl;
2326                 ocs_dma_t **sgls = &psgls;
2327
2328                 /* non-local buffer required for mailbox queue */
2329                 post_sgl = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2330                 if (post_sgl == NULL) {
2331                         ocs_log_err(hw->os, "no buffer for command\n");
2332                         return OCS_HW_RTN_NO_MEMORY;
2333                 }
2334                 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, post_sgl, SLI4_BMBX_SIZE,
2335                                                 io->indicator, 1, sgls, NULL, NULL)) {
2336                         if (ocs_hw_command(hw, post_sgl, OCS_CMD_NOWAIT,
2337                                             ocs_hw_rqpair_auto_xfer_rdy_move_to_port_cb, NULL)) {
2338                                 ocs_free(hw->os, post_sgl, SLI4_BMBX_SIZE);
2339                                 ocs_log_err(hw->os, "SGL post failed\n");
2340                                 return OCS_HW_RTN_ERROR;
2341                         }
2342                 }
2343         }
2344
2345         ocs_lock(&hw->io_lock);
2346         if (ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 0) != 0) { /* DNRX set - no buffer */
2347                 ocs_unlock(&hw->io_lock);
2348                 return OCS_HW_RTN_ERROR;
2349         }
2350         ocs_unlock(&hw->io_lock);
2351         return OCS_HW_RTN_SUCCESS;
2352 }
2353
2354 /**
2355  * @brief Prepares an XRI to move back to the host.
2356  *
2357  * @par Description
2358  * Releases any attached buffer back to the pool.
2359  *
2360  * @param hw Hardware context allocated by the caller.
2361  * @param io Pointer to the IO object.
2362  */
2363 void
2364 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(ocs_hw_t *hw, ocs_hw_io_t *io)
2365 {
2366         if (io->axr_buf != NULL) {
2367                 ocs_lock(&hw->io_lock);
2368                         /* check  list and remove if there */
2369                         if (ocs_list_on_list(&io->dnrx_link)) {
2370                                 ocs_list_remove(&hw->io_port_dnrx, io);
2371                                 io->auto_xfer_rdy_dnrx = 0;
2372
2373                                 /* release the count for waiting for a buffer */
2374                                 ocs_hw_io_free(hw, io);
2375                         }
2376
2377                         ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
2378                         io->axr_buf = NULL;
2379                 ocs_unlock(&hw->io_lock);
2380
2381                 ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2382         }
2383         return;
2384 }
2385
2386 /**
2387  * @brief Posts an auto xfer rdy buffer to an IO.
2388  *
2389  * @par Description
2390  * Puts the data SGL into the SGL list for the IO object
2391  * @n @name
2392  * @b Note: io_lock must be held.
2393  *
2394  * @param hw Hardware context allocated by the caller.
2395  * @param io Pointer to the IO object.
2396  *
2397  * @return Returns the value of DNRX bit in the TRSP and ABORT WQEs.
2398  */
2399 uint8_t
2400 ocs_hw_rqpair_auto_xfer_rdy_buffer_post(ocs_hw_t *hw, ocs_hw_io_t *io, int reuse_buf)
2401 {
2402         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2403         sli4_sge_t      *data;
2404
2405         if(!reuse_buf) {
2406                 buf = ocs_pool_get(hw->auto_xfer_rdy_buf_pool);
2407                 io->axr_buf = buf;
2408         }
2409
2410         data = io->def_sgl.virt;
2411         data[0].sge_type = SLI4_SGE_TYPE_SKIP;
2412         data[0].last = 0;
2413
2414         /*
2415          * Note: if we are doing DIF assists, then the SGE[1] must contain the
2416          * DI_SEED SGE. The host is responsible for programming:
2417          *   SGE Type (Word 2, bits 30:27)
2418          *   Replacement App Tag (Word 2 bits 15:0)
2419          *   App Tag (Word 3 bits 15:0)
2420          *   New Ref Tag (Word 3 bit 23)
2421          *   Metadata Enable (Word 3 bit 20)
2422          *   Auto-Increment RefTag (Word 3 bit 19)
2423          *   Block Size (Word 3 bits 18:16)
2424          * The following fields are managed by the SLI Port:
2425          *    Ref Tag Compare (Word 0)
2426          *    Replacement Ref Tag (Word 1) - In not the LBA
2427          *    NA (Word 2 bit 25)
2428          *    Opcode RX (Word 3 bits 27:24)
2429          *    Checksum Enable (Word 3 bit 22)
2430          *    RefTag Enable (Word 3 bit 21)
2431          *
2432          * The first two SGLs are cleared by ocs_hw_io_init_sges(), so assume eveything is cleared.
2433          */
2434         if (hw->config.auto_xfer_rdy_p_type) {
2435                 sli4_diseed_sge_t *diseed = (sli4_diseed_sge_t*)&data[1];
2436
2437                 diseed->sge_type = SLI4_SGE_TYPE_DISEED;
2438                 diseed->repl_app_tag = hw->config.auto_xfer_rdy_app_tag_value;
2439                 diseed->app_tag_cmp = hw->config.auto_xfer_rdy_app_tag_value;
2440                 diseed->check_app_tag = hw->config.auto_xfer_rdy_app_tag_valid;
2441                 diseed->auto_incr_ref_tag = TRUE; /* Always the LBA */
2442                 diseed->dif_blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
2443         } else {
2444                 data[1].sge_type = SLI4_SGE_TYPE_SKIP;
2445                 data[1].last = 0;
2446         }
2447
2448         data[2].sge_type = SLI4_SGE_TYPE_DATA;
2449         data[2].buffer_address_high = ocs_addr32_hi(io->axr_buf->payload.dma.phys);
2450         data[2].buffer_address_low  = ocs_addr32_lo(io->axr_buf->payload.dma.phys);
2451         data[2].buffer_length = io->axr_buf->payload.dma.size;
2452         data[2].last = TRUE;
2453         data[3].sge_type = SLI4_SGE_TYPE_SKIP;
2454
2455         return 0;
2456 }
2457
2458 /**
2459  * @brief Return auto xfer ready buffers (while in RQ pair mode).
2460  *
2461  * @par Description
2462  * The header and payload buffers are returned to the auto xfer rdy pool.
2463  *
2464  * @param hw Hardware context.
2465  * @param seq Header/payload sequence buffers.
2466  *
2467  * @return Returns OCS_HW_RTN_SUCCESS for success, an error code value for failure.
2468  */
2469
2470 static ocs_hw_rtn_e
2471 ocs_hw_rqpair_auto_xfer_rdy_buffer_sequence_reset(ocs_hw_t *hw, ocs_hw_sequence_t *seq)
2472 {
2473         ocs_hw_auto_xfer_rdy_buffer_t *buf = seq->header->dma.alloc;
2474
2475         buf->data_cqe = 0;
2476         buf->cmd_cqe = 0;
2477         buf->fcfi = 0;
2478         buf->call_axr_cmd = 0;
2479         buf->call_axr_data = 0;
2480
2481         /* build a fake data header in big endian */
2482         buf->hdr.info = FC_RCTL_INFO_SOL_DATA;
2483         buf->hdr.r_ctl = FC_RCTL_FC4_DATA;
2484         buf->hdr.type = FC_TYPE_FCP;
2485         buf->hdr.f_ctl = fc_htobe24(FC_FCTL_EXCHANGE_RESPONDER |
2486                                         FC_FCTL_FIRST_SEQUENCE |
2487                                         FC_FCTL_LAST_SEQUENCE |
2488                                         FC_FCTL_END_SEQUENCE |
2489                                         FC_FCTL_SEQUENCE_INITIATIVE);
2490
2491         /* build the fake header DMA object */
2492         buf->header.rqindex = OCS_HW_RQ_INDEX_DUMMY_HDR;
2493         buf->header.dma.virt = &buf->hdr;
2494         buf->header.dma.alloc = buf;
2495         buf->header.dma.size = sizeof(buf->hdr);
2496         buf->header.dma.len = sizeof(buf->hdr);
2497         buf->payload.rqindex = OCS_HW_RQ_INDEX_DUMMY_DATA;
2498
2499         ocs_hw_rqpair_auto_xfer_rdy_dnrx_check(hw);
2500
2501         return OCS_HW_RTN_SUCCESS;
2502 }
2503
2504 /**
2505  * @ingroup devInitShutdown
2506  * @brief Free auto xfer rdy buffers.
2507  *
2508  * @par Description
2509  * Frees the auto xfer rdy buffers.
2510  *
2511  * @param hw Hardware context allocated by the caller.
2512  *
2513  * @return Returns 0 on success, or a non-zero value on failure.
2514  */
2515 static void
2516 ocs_hw_rqpair_auto_xfer_rdy_buffer_free(ocs_hw_t *hw)
2517 {
2518         ocs_hw_auto_xfer_rdy_buffer_t *buf;
2519         uint32_t i;
2520
2521         if (hw->auto_xfer_rdy_buf_pool != NULL) {
2522                 ocs_lock(&hw->io_lock);
2523                         for (i = 0; i < ocs_pool_get_count(hw->auto_xfer_rdy_buf_pool); i++) {
2524                                 buf = ocs_pool_get_instance(hw->auto_xfer_rdy_buf_pool, i);
2525                                 if (buf != NULL) {
2526                                         ocs_dma_free(hw->os, &buf->payload.dma);
2527                                 }
2528                         }
2529                 ocs_unlock(&hw->io_lock);
2530
2531                 ocs_pool_free(hw->auto_xfer_rdy_buf_pool);
2532                 hw->auto_xfer_rdy_buf_pool = NULL;
2533         }
2534 }
2535
2536 /**
2537  * @ingroup devInitShutdown
2538  * @brief Configure the rq_pair function from ocs_hw_init().
2539  *
2540  * @par Description
2541  * Allocates the buffers to auto xfer rdy and posts initial XRIs for this feature.
2542  *
2543  * @param hw Hardware context allocated by the caller.
2544  *
2545  * @return Returns 0 on success, or a non-zero value on failure.
2546  */
2547 ocs_hw_rtn_e
2548 ocs_hw_rqpair_init(ocs_hw_t *hw)
2549 {
2550         ocs_hw_rtn_e    rc;
2551         uint32_t xris_posted;
2552
2553         ocs_log_debug(hw->os, "RQ Pair mode\n");
2554
2555         /*
2556          * If we get this far, the auto XFR_RDY feature was enabled successfully, otherwise ocs_hw_init() would
2557          * return with an error. So allocate the buffers based on the initial XRI pool required to support this
2558          * feature.
2559          */
2560         if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
2561             hw->config.auto_xfer_rdy_size > 0) {
2562                 if (hw->auto_xfer_rdy_buf_pool == NULL) {
2563                         /*
2564                          * Allocate one more buffer than XRIs so that when all the XRIs are in use, we still have
2565                          * one to post back for the case where the response phase is started in the context of
2566                          * the data completion.
2567                          */
2568                         rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_alloc(hw, hw->config.auto_xfer_rdy_xri_cnt + 1);
2569                         if (rc != OCS_HW_RTN_SUCCESS) {
2570                                 return rc;
2571                         }
2572                 } else {
2573                         ocs_pool_reset(hw->auto_xfer_rdy_buf_pool);
2574                 }
2575
2576                 /* Post the auto XFR_RDY XRIs */
2577                 xris_posted = ocs_hw_xri_move_to_port_owned(hw, hw->config.auto_xfer_rdy_xri_cnt);
2578                 if (xris_posted != hw->config.auto_xfer_rdy_xri_cnt) {
2579                         ocs_log_err(hw->os, "post_xri failed, only posted %d XRIs\n", xris_posted);
2580                         return OCS_HW_RTN_ERROR;
2581                 }
2582         }
2583
2584         return 0;
2585 }
2586
2587 /**
2588  * @ingroup devInitShutdown
2589  * @brief Tear down the rq_pair function from ocs_hw_teardown().
2590  *
2591  * @par Description
2592  * Frees the buffers to auto xfer rdy.
2593  *
2594  * @param hw Hardware context allocated by the caller.
2595  */
2596 void
2597 ocs_hw_rqpair_teardown(ocs_hw_t *hw)
2598 {
2599         /* We need to free any auto xfer ready buffers */
2600         ocs_hw_rqpair_auto_xfer_rdy_buffer_free(hw);
2601 }