]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_init.c
cam: fix xpt_bus_register and xpt_bus_deregister return errno
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_init.c
1 /*-
2  * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26 /* $FreeBSD$ */
27
28 #include "smartpqi_includes.h"
29
30 /* 5 mins timeout for quiesce */
31 #define PQI_QUIESCE_TIMEOUT     300000
32
33 /*
34  * Request the adapter to get PQI capabilities supported.
35  */
36 static int
37 pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
38 {
39         int ret = PQI_STATUS_SUCCESS;
40         
41         DBG_FUNC("IN\n");
42
43         gen_adm_req_iu_t        admin_req;
44         gen_adm_resp_iu_t       admin_resp;
45         dma_mem_t               pqi_cap_dma_buf;
46         pqi_dev_cap_t           *capability = NULL;
47         pqi_iu_layer_desc_t     *iu_layer_desc = NULL;
48
49         /* Allocate Non DMA memory */
50         capability = os_mem_alloc(softs, sizeof(*capability));
51         if (!capability) {
52                 DBG_ERR("Failed to allocate memory for capability\n");
53                 ret = PQI_STATUS_FAILURE;
54                 goto err_out;
55         }
56
57         memset(&admin_req, 0, sizeof(admin_req));
58         memset(&admin_resp, 0, sizeof(admin_resp));
59
60         memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
61         pqi_cap_dma_buf.tag = "pqi_cap_buf";
62         pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
63         pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
64
65         ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
66         if (ret) {
67                 DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
68                 goto err_dma_alloc;
69         }
70
71         admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
72         admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
73         admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
74         admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
75         admin_req.req_type.general_func.sg_desc.type =  SGL_DESCRIPTOR_CODE_DATA_BLOCK;
76
77         ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
78         if( PQI_STATUS_SUCCESS == ret) {
79                 memcpy(capability,
80                         pqi_cap_dma_buf.virt_addr,
81                         pqi_cap_dma_buf.size);
82         } else {
83                 DBG_ERR("Failed to send admin req report pqi device capability\n");
84                 goto err_admin_req;
85
86         }
87
88         softs->pqi_dev_cap.max_iqs = capability->max_iqs;
89         softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
90         softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
91         softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
92         softs->pqi_dev_cap.max_oqs = capability->max_oqs;
93         softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
94         softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
95         softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
96
97         iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
98         softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
99         softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
100         softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
101
102         DBG_INIT("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
103         DBG_INIT("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
104         DBG_INIT("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
105         DBG_INIT("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
106         DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
107         DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
108         DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
109         DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
110         DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
111         DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
112         DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
113
114
115         os_mem_free(softs, (void *)capability,
116                     REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
117         os_dma_mem_free(softs, &pqi_cap_dma_buf);
118
119         DBG_FUNC("OUT\n");
120         return ret;
121
122 err_admin_req:
123         os_dma_mem_free(softs, &pqi_cap_dma_buf);
124 err_dma_alloc:
125         if (capability)
126                 os_mem_free(softs, (void *)capability,
127                             REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
128 err_out:
129         DBG_FUNC("failed OUT\n");
130         return PQI_STATUS_FAILURE;
131 }
132
133 /*
134  * Function used to deallocate the used rcb.
135  */
136 void
137 pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
138 {
139         uint32_t num_req;
140         size_t size;
141         int i;
142
143         DBG_FUNC("IN\n");
144         num_req = softs->max_outstanding_io + 1;
145         size = num_req * sizeof(rcb_t);
146         for (i = 1; i < req_count; i++)
147                 os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
148         os_mem_free(softs, (void *)softs->rcb, size);
149         softs->rcb = NULL;
150         DBG_FUNC("OUT\n");
151 }
152
153
154 /*
155  * Allocate memory for rcb and SG descriptors.
156  */
157 static int
158 pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
159 {
160         int ret = PQI_STATUS_SUCCESS;
161         int i = 0;
162         uint32_t num_req = 0;
163         uint32_t sg_buf_size = 0;
164         uint64_t alloc_size = 0;
165         rcb_t *rcb = NULL;
166         rcb_t *prcb = NULL;
167         DBG_FUNC("IN\n");
168
169         /* Set maximum outstanding requests */
170         /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
171          * The rcb will be accessed by using the tag as index
172          * As 0 tag index is not used, we need to allocate one extra.
173          */
174         softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
175         num_req = softs->max_outstanding_io + 1;
176         DBG_INIT("Max Outstanding IO reset to %d\n", num_req);
177
178         alloc_size = num_req * sizeof(rcb_t);
179
180         /* Allocate Non DMA memory */
181         rcb = os_mem_alloc(softs, alloc_size);
182         if (!rcb) {
183                 DBG_ERR("Failed to allocate memory for rcb\n");
184                 ret = PQI_STATUS_FAILURE;
185                 goto err_out;
186         }
187         softs->rcb = rcb;
188
189         /* Allocate sg dma memory for sg chain  */
190         sg_buf_size = softs->pqi_cap.max_sg_elem *
191                         sizeof(sgt_t);
192
193         prcb = &softs->rcb[1];
194         /* Initialize rcb */
195         for(i=1; i < num_req; i++) {
196                 char tag[15];
197                 sprintf(tag, "sg_dma_buf%d", i);
198                 softs->sg_dma_desc[i].tag = tag;
199                 softs->sg_dma_desc[i].size = sg_buf_size;
200                 softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
201
202                 ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
203                 if (ret) {
204                         DBG_ERR("Failed to Allocate sg desc %d\n", ret);
205                         ret = PQI_STATUS_FAILURE;
206                         goto error;
207                 }
208                 prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
209                 prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
210                 prcb ++;
211         }
212
213         DBG_FUNC("OUT\n");
214         return ret;
215 error:
216         pqisrc_free_rcb(softs, i);
217 err_out:
218         DBG_FUNC("failed OUT\n");
219         return ret;
220 }
221
222 /*
223  * Function used to decide the operational queue configuration params
224  * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
225  */
226 void
227 pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
228 {
229         uint16_t total_iq_elements;
230
231         DBG_FUNC("IN\n");
232
233         DBG_INIT("softs->intr_count : %d  softs->num_cpus_online : %d",
234                 softs->intr_count, softs->num_cpus_online);
235         
236         if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
237                 /* Share the event and Operational queue. */
238                 softs->num_op_obq = 1;
239                 softs->share_opq_and_eventq = true;
240         }
241         else {
242                 /* Note :  One OBQ (OBQ0) reserved for event queue */
243                 softs->num_op_obq = MIN(softs->num_cpus_online,
244                                         softs->intr_count) - 1;
245                 softs->share_opq_and_eventq = false;
246         }
247         /* If the available interrupt count is more than one,
248         we dont need to share the interrupt for IO and event queue */
249         if (softs->intr_count > 1)
250                 softs->share_opq_and_eventq = false;
251
252         DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
253
254         softs->num_op_raid_ibq = softs->num_op_obq;
255         softs->num_op_aio_ibq = softs->num_op_raid_ibq;
256         softs->ibq_elem_size =  softs->pqi_dev_cap.max_iq_elem_len * 16;
257         softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
258         if (softs->max_ib_iu_length_per_fw == 256 &&
259             softs->ob_spanning_supported) {
260                 /* older f/w that doesn't actually support spanning. */
261                 softs->max_ib_iu_length = softs->ibq_elem_size;
262         } else {
263                 /* max. inbound IU length is an multiple of our inbound element size. */
264                 softs->max_ib_iu_length =
265                         (softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
266                          softs->ibq_elem_size;
267
268         }
269         /* If Max. Outstanding IO came with Max. Spanning element count then,
270                 needed elements per IO are multiplication of
271                 Max.Outstanding IO and  Max.Spanning element */
272         total_iq_elements = (softs->max_outstanding_io *
273                 (softs->max_ib_iu_length / softs->ibq_elem_size));
274
275         softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
276         softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
277                 softs->pqi_dev_cap.max_iq_elements);
278
279         softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
280         softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
281                 softs->pqi_dev_cap.max_oq_elements);
282
283         softs->max_sg_per_iu = ((softs->max_ib_iu_length -
284                                 softs->ibq_elem_size) /
285                                 sizeof(sgt_t)) +
286                                 MAX_EMBEDDED_SG_IN_FIRST_IU;
287
288         DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
289         DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
290         DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
291         DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
292
293         DBG_FUNC("OUT\n");
294 }
295
296 /*
297  * Configure the operational queue parameters.
298  */
299 int
300 pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
301 {
302         int ret = PQI_STATUS_SUCCESS;
303
304         /* Get the PQI capability,
305                 REPORT PQI DEVICE CAPABILITY request */
306         ret = pqisrc_report_pqi_capability(softs);
307         if (ret) {
308                 DBG_ERR("Failed to send report pqi dev capability request : %d\n",
309                                 ret);
310                 goto err_out;
311         }
312
313         /* Reserve required no of slots for internal requests */
314         softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
315
316         /* Decide the Op queue configuration */
317         pqisrc_decide_opq_config(softs);
318
319         DBG_FUNC("OUT\n");
320         return ret;
321
322 err_out:
323         DBG_FUNC("OUT failed\n");
324         return ret;
325 }
326
327 /*
328  * Validate the PQI mode of adapter.
329  */
330 int
331 pqisrc_check_pqimode(pqisrc_softstate_t *softs)
332 {
333         int ret = PQI_STATUS_FAILURE;
334         int tmo = 0;
335         uint64_t signature = 0;
336
337         DBG_FUNC("IN\n");
338
339         /* Check the PQI device signature */
340         tmo = PQISRC_PQIMODE_READY_TIMEOUT;
341         do {
342                 signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
343
344                 if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
345                                 sizeof(uint64_t)) == 0) {
346                         ret = PQI_STATUS_SUCCESS;
347                         break;
348                 }
349                 OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
350         } while (tmo--);
351
352         PRINT_PQI_SIGNATURE(signature);
353
354         if (tmo <= 0) {
355                 DBG_ERR("PQI Signature is invalid\n");
356                 ret = PQI_STATUS_TIMEOUT;
357                 goto err_out;
358         }
359
360         tmo = PQISRC_PQIMODE_READY_TIMEOUT;
361         /* Check function and status code for the device */
362         COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
363                 PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
364         if (!tmo) {
365                 DBG_ERR("PQI device is not in IDLE state\n");
366                 ret = PQI_STATUS_TIMEOUT;
367                 goto err_out;
368         }
369
370
371         tmo = PQISRC_PQIMODE_READY_TIMEOUT;
372         /* Check the PQI device status register */
373         COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
374                                 PQI_DEV_STATE_AT_INIT, tmo);
375         if (!tmo) {
376                 DBG_ERR("PQI Registers are not ready\n");
377                 ret = PQI_STATUS_TIMEOUT;
378                 goto err_out;
379         }
380
381         DBG_FUNC("OUT\n");
382         return ret;
383 err_out:
384         DBG_FUNC("OUT failed\n");
385         return ret;
386 }
387
388 /* PQI Feature processing */
389 static int
390 pqisrc_config_table_update(struct pqisrc_softstate *softs,
391         uint16_t first_section, uint16_t last_section)
392 {
393         pqi_vendor_general_request_t request;
394         int ret = PQI_STATUS_FAILURE;
395
396         memset(&request, 0, sizeof(request));
397
398         request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
399         request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
400         request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
401         request.data.config_table_update.first_section = first_section;
402         request.data.config_table_update.last_section = last_section;
403
404         ret = pqisrc_build_send_vendor_request(softs, &request, NULL);
405
406         if (ret != PQI_STATUS_SUCCESS) {
407                 DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
408                 return PQI_STATUS_FAILURE;
409         }
410
411         return PQI_STATUS_SUCCESS;
412 }
413
414 static inline
415 boolean_t pqi_is_firmware_feature_supported(
416         struct pqi_conf_table_firmware_features *firmware_feature_list,
417         unsigned int bit_position)
418 {
419         unsigned int byte_index;
420
421         byte_index = bit_position / BITS_PER_BYTE;
422
423         if (byte_index >= firmware_feature_list->num_elements)
424                 return false;
425
426         return firmware_feature_list->features_supported[byte_index] &
427                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
428 }
429
430 static inline
431 boolean_t pqi_is_firmware_feature_enabled(
432         struct pqi_conf_table_firmware_features *firmware_feature_list,
433         uint8_t *firmware_features_addr, unsigned int bit_position)
434 {
435         unsigned int byte_index;
436         uint8_t *feature_enabled_addr;
437
438         byte_index = (bit_position / BITS_PER_BYTE) +
439                 (firmware_feature_list->num_elements * 2);
440
441         feature_enabled_addr = firmware_features_addr +
442                 offsetof(struct pqi_conf_table_firmware_features,
443                         features_supported) + byte_index;
444
445         return *feature_enabled_addr &
446                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
447 }
448
449 static inline void
450 pqi_request_firmware_feature(
451         struct pqi_conf_table_firmware_features *firmware_feature_list,
452         unsigned int bit_position)
453 {
454         unsigned int byte_index;
455
456         byte_index = (bit_position / BITS_PER_BYTE) +
457                 firmware_feature_list->num_elements;
458
459         firmware_feature_list->features_supported[byte_index] |=
460                 (1 << (bit_position % BITS_PER_BYTE));
461 }
462
463 /* Update PQI config table firmware features section and inform the firmware */
464 static int
465 pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs,
466         struct pqi_conf_table_firmware_features *firmware_feature_list)
467 {
468         uint8_t *request_feature_addr;
469         void *request_feature_abs_addr;
470
471         request_feature_addr = firmware_feature_list->features_supported +
472                 firmware_feature_list->num_elements;
473         request_feature_abs_addr = softs->fw_features_section_abs_addr +
474                 (request_feature_addr - (uint8_t*)firmware_feature_list);
475
476         os_io_memcpy(request_feature_abs_addr, request_feature_addr,
477                         firmware_feature_list->num_elements);
478
479         return pqisrc_config_table_update(softs,
480                 PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
481                 PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
482 }
483
484 /* Check firmware has enabled the feature specified in the respective bit position. */
485 inline boolean_t
486 pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs,
487                 struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position)
488 {
489         uint16_t byte_index;
490         uint8_t *features_enabled_abs_addr;
491
492         byte_index = (bit_position / BITS_PER_BYTE) +
493                 (firmware_feature_list->num_elements * 2);
494
495         features_enabled_abs_addr = softs->fw_features_section_abs_addr +
496         offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index;
497
498         return *features_enabled_abs_addr &
499                 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
500 }
501
502 static void
503 pqi_firmware_feature_status(struct pqisrc_softstate     *softs,
504         struct pqi_firmware_feature *firmware_feature)
505 {
506         switch(firmware_feature->feature_bit) {
507         case PQI_FIRMWARE_FEATURE_OFA:
508                 break;
509         case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT:
510                 softs->timeout_in_passthrough = true;
511                 break;
512         case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT:
513                 softs->timeout_in_tmf = true;
514                 break;
515         default:
516                 DBG_NOTE("Nothing to do \n");
517         }
518 }
519
520 /* Firmware features supported by the driver */
521 static struct
522 pqi_firmware_feature pqi_firmware_features[] = {
523         {
524                 .feature_name = "Support timeout for pass-through commands",
525                 .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT,
526                 .feature_status = pqi_firmware_feature_status,
527         },
528         {
529                 .feature_name = "Support timeout for LUN Reset TMF",
530                 .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT,
531                 .feature_status = pqi_firmware_feature_status,
532         }
533 };
534
535 static void
536 pqisrc_process_firmware_features(pqisrc_softstate_t *softs)
537 {
538         int rc;
539         struct pqi_conf_table_firmware_features *firmware_feature_list;
540         unsigned int i;
541         unsigned int num_features_requested;
542
543         firmware_feature_list = (struct pqi_conf_table_firmware_features*)
544                 softs->fw_features_section_abs_addr;
545
546         /* Check features and request those supported by firmware and driver.*/
547         for (i = 0, num_features_requested = 0;
548                 i < ARRAY_SIZE(pqi_firmware_features); i++) {
549                 /* Firmware support it ? */
550                 if (pqi_is_firmware_feature_supported(firmware_feature_list,
551                                 pqi_firmware_features[i].feature_bit)) {
552                         pqi_request_firmware_feature(firmware_feature_list,
553                                 pqi_firmware_features[i].feature_bit);
554                         pqi_firmware_features[i].supported = true;
555                         num_features_requested++;
556                         DBG_NOTE("%s supported by driver, requesting firmware to enable it\n",
557                                         pqi_firmware_features[i].feature_name);
558                 } else {
559                         DBG_NOTE("%s supported by driver, but not by current firmware\n",
560                                         pqi_firmware_features[i].feature_name);
561                 }
562         }
563         if (num_features_requested == 0)
564                 return;
565
566         rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list);
567         if (rc) {
568                 DBG_ERR("Failed to update pqi config table\n");
569                 return;
570         }
571
572         for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
573                 if (pqi_is_firmware_feature_enabled(firmware_feature_list,
574                         softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) {
575                         pqi_firmware_features[i].enabled = true;
576                         DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name);
577                         if(pqi_firmware_features[i].feature_status)
578                                 pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i]));
579                 }
580         }
581 }
582
583 /*
584  * Get the PQI configuration table parameters.
585  * Currently using for heart-beat counter scratch-pad register.
586  */
587 int
588 pqisrc_process_config_table(pqisrc_softstate_t *softs)
589 {
590         int ret = PQI_STATUS_FAILURE;
591         uint32_t config_table_size;
592         uint32_t section_off;
593         uint8_t *config_table_abs_addr;
594         struct pqi_conf_table *conf_table;
595         struct pqi_conf_table_section_header *section_hdr;
596
597         config_table_size = softs->pqi_cap.conf_tab_sz;
598
599         if (config_table_size < sizeof(*conf_table) ||
600                 config_table_size > PQI_CONF_TABLE_MAX_LEN) {
601                 DBG_ERR("Invalid PQI conf table length of %u\n",
602                         config_table_size);
603                 return ret;
604         }
605
606         conf_table = os_mem_alloc(softs, config_table_size);
607         if (!conf_table) {
608                 DBG_ERR("Failed to allocate memory for PQI conf table\n");
609                 return ret;
610         }
611
612         if (config_table_size < sizeof(conf_table) ||
613                 config_table_size > PQI_CONF_TABLE_MAX_LEN) {
614                 DBG_ERR("Invalid PQI conf table length of %u\n",
615                         config_table_size);
616                 goto out;
617         }
618
619         config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
620                                         softs->pqi_cap.conf_tab_off);
621
622         PCI_MEM_GET_BUF(softs, config_table_abs_addr,
623                         softs->pqi_cap.conf_tab_off,
624                         (uint8_t*)conf_table, config_table_size);
625
626
627         if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
628                         sizeof(conf_table->sign)) != 0) {
629                 DBG_ERR("Invalid PQI config signature\n");
630                 goto out;
631         }
632
633         section_off = LE_32(conf_table->first_section_off);
634
635         while (section_off) {
636
637                 if (section_off+ sizeof(*section_hdr) >= config_table_size) {
638                         DBG_INFO("Reached end of PQI config table. Breaking off.\n");
639                         break;
640                 }
641
642                 section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
643
644                 switch (LE_16(section_hdr->section_id)) {
645                 case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
646                 case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
647                 case PQI_CONF_TABLE_SECTION_DEBUG:
648                         break;
649                 case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
650                         softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off;
651                         softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off;
652                         pqisrc_process_firmware_features(softs);
653                 break;
654                 case PQI_CONF_TABLE_SECTION_HEARTBEAT:
655                 softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
656                                                 section_off +
657                                                 offsetof(struct pqi_conf_table_heartbeat,
658                                                 heartbeat_counter);
659                 softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
660                                                         softs->heartbeat_counter_off);
661                 ret = PQI_STATUS_SUCCESS;
662                 break;
663                 default:
664                 DBG_INFO("unrecognized PQI config table section ID: 0x%x\n",
665                                         LE_16(section_hdr->section_id));
666                 break;
667                 }
668                 section_off = LE_16(section_hdr->next_section_off);
669         }
670 out:
671         os_mem_free(softs, (void *)conf_table,config_table_size);
672         return ret;
673 }
674
675 /* Wait for PQI reset completion for the adapter*/
676 int
677 pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
678 {
679         int ret = PQI_STATUS_SUCCESS;
680         pqi_reset_reg_t reset_reg;
681         int pqi_reset_timeout = 0;
682         uint64_t val = 0;
683         uint32_t max_timeout = 0;
684
685         val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
686
687         max_timeout = (val & 0xFFFF00000000) >> 32;
688
689         DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
690
691         while(1) {
692                 if (pqi_reset_timeout++ == max_timeout) {
693                         return PQI_STATUS_TIMEOUT;
694                 }
695                 OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
696                 reset_reg.all_bits = PCI_MEM_GET32(softs,
697                         &softs->pqi_reg->dev_reset, PQI_DEV_RESET);
698                 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
699                         break;
700         }
701
702         return ret;
703 }
704
705 /*
706  * Function used to perform PQI hard reset.
707  */
708 int
709 pqi_reset(pqisrc_softstate_t *softs)
710 {
711         int ret = PQI_STATUS_SUCCESS;
712         uint32_t val = 0;
713         pqi_reset_reg_t pqi_reset_reg;
714
715         DBG_FUNC("IN\n");
716
717         if (true == softs->ctrl_in_pqi_mode) {
718
719                 if (softs->pqi_reset_quiesce_allowed) {
720                         val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
721                                         LEGACY_SIS_IDBR);
722                         val |= SIS_PQI_RESET_QUIESCE;
723                         PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
724                                         LEGACY_SIS_IDBR, LE_32(val));
725                         ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
726                         if (ret) {
727                                 DBG_ERR("failed with error %d during quiesce\n", ret);
728                                 return ret;
729                         }
730                 }
731
732                 pqi_reset_reg.all_bits = 0;
733                 pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
734                 pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
735
736                 PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
737                         LE_32(pqi_reset_reg.all_bits));
738
739                 ret = pqisrc_wait_for_pqi_reset_completion(softs);
740                 if (ret) {
741                         DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
742                         return ret;
743                 }
744         }
745         softs->ctrl_in_pqi_mode = false;
746         DBG_FUNC("OUT\n");
747         return ret;
748 }
749
750 /*
751  * Initialize the adapter with supported PQI configuration.
752  */
753 int
754 pqisrc_pqi_init(pqisrc_softstate_t *softs)
755 {
756         int ret = PQI_STATUS_SUCCESS;
757
758         DBG_FUNC("IN\n");
759
760         /* Check the PQI signature */
761         ret = pqisrc_check_pqimode(softs);
762         if(ret) {
763                 DBG_ERR("failed to switch to pqi\n");
764                 goto err_out;
765         }
766
767         PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
768         softs->ctrl_in_pqi_mode = true;
769
770         /* Get the No. of Online CPUs,NUMA/Processor config from OS */
771         ret = os_get_processor_config(softs);
772         if (ret) {
773                 DBG_ERR("Failed to get processor config from OS %d\n",
774                         ret);
775                 goto err_out;
776         }
777
778         softs->intr_type = INTR_TYPE_NONE;
779
780         /* Get the interrupt count, type, priority available from OS */
781         ret = os_get_intr_config(softs);
782         if (ret) {
783                 DBG_ERR("Failed to get interrupt config from OS %d\n",
784                         ret);
785                 goto err_out;
786         }
787
788         /*Enable/Set Legacy INTx Interrupt mask clear pqi register,
789          *if allocated interrupt is legacy type.
790          */
791         if (INTR_TYPE_FIXED == softs->intr_type) {
792                 pqisrc_configure_legacy_intx(softs, true);
793                 sis_enable_intx(softs);
794         }
795
796         /* Create Admin Queue pair*/
797         ret = pqisrc_create_admin_queue(softs);
798         if(ret) {
799                 DBG_ERR("Failed to configure admin queue\n");
800                 goto err_admin_queue;
801         }
802
803         /* For creating event and IO operational queues we have to submit
804            admin IU requests.So Allocate resources for submitting IUs */
805
806         /* Allocate the request container block (rcb) */
807         ret = pqisrc_allocate_rcb(softs);
808         if (ret == PQI_STATUS_FAILURE) {
809                 DBG_ERR("Failed to allocate rcb \n");
810                 goto err_rcb;
811         }
812
813         /* Allocate & initialize request id queue */
814         ret = pqisrc_init_taglist(softs,&softs->taglist,
815                                 softs->max_outstanding_io);
816         if (ret) {
817                 DBG_ERR("Failed to allocate memory for request id q : %d\n",
818                         ret);
819                 goto err_taglist;
820         }
821
822         ret = pqisrc_configure_op_queues(softs);
823         if (ret) {
824                         DBG_ERR("Failed to configure op queue\n");
825                         goto err_config_opq;
826         }
827
828         /* Create Operational queues */
829         ret = pqisrc_create_op_queues(softs);
830         if(ret) {
831                 DBG_ERR("Failed to create op queue\n");
832                 ret = PQI_STATUS_FAILURE;
833                 goto err_create_opq;
834         }
835
836         softs->ctrl_online = true;
837
838         DBG_FUNC("OUT\n");
839         return ret;
840
841 err_create_opq:
842 err_config_opq:
843         pqisrc_destroy_taglist(softs,&softs->taglist);
844 err_taglist:
845         pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
846 err_rcb:
847         pqisrc_destroy_admin_queue(softs);
848 err_admin_queue:
849         os_free_intr_config(softs);
850 err_out:
851         DBG_FUNC("OUT failed\n");
852         return PQI_STATUS_FAILURE;
853 }
854
855 int
856 pqisrc_force_sis(pqisrc_softstate_t *softs)
857 {
858         int ret = PQI_STATUS_SUCCESS;
859
860         if (SIS_IS_KERNEL_PANIC(softs)) {
861                 DBG_INIT("Controller FW is not runnning");
862                 return PQI_STATUS_FAILURE;
863         }
864
865         if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
866                 return ret;
867         }
868
869         if (SIS_IS_KERNEL_UP(softs)) {
870                 PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
871                 return ret;
872         }
873         /* Disable interrupts ? */
874         sis_disable_interrupt(softs);
875
876         /* reset pqi, this will delete queues */
877         ret = pqi_reset(softs);
878         if (ret) {
879                 return ret;
880         }
881         /* Re enable SIS */
882         ret = pqisrc_reenable_sis(softs);
883         if (ret) {
884                 return ret;
885         }
886
887         PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
888
889         return ret;
890 }
891
892 static int
893 pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
894 {
895         int count = 0;
896         int ret = PQI_STATUS_SUCCESS;
897
898         DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem);
899
900         if (softs->taglist.num_elem == softs->max_outstanding_io)
901                 return ret;
902         else {
903                 DBG_WARN("%d commands pending\n",
904                 softs->max_outstanding_io - softs->taglist.num_elem);
905
906                 while(1) {
907
908                         /* Since heartbeat timer stopped ,check for firmware status*/
909                         if (SIS_IS_KERNEL_PANIC(softs)) {
910                                 DBG_ERR("Controller FW is not running\n");
911                                 return PQI_STATUS_FAILURE;
912                         }
913
914                         if (softs->taglist.num_elem != softs->max_outstanding_io) {
915                                 /* Sleep for 1 msec */
916                                 OS_SLEEP(1000);
917                                 count++;
918                                 if(count % 1000 == 0) {
919                                         DBG_WARN("Waited for %d seconds", count/1000);
920                                 }
921                                 if (count >= PQI_QUIESCE_TIMEOUT) {
922                                         return PQI_STATUS_FAILURE;
923                                 }
924                                 continue;
925                         }
926                         break;
927                 }
928         }
929         return ret;
930 }
931
932 static void
933 pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
934 {
935
936         int tag = 0;
937         rcb_t *rcb;
938
939         for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
940                 rcb = &softs->rcb[tag];
941                 if(rcb->req_pending && is_internal_req(rcb)) {
942                         rcb->status = REQUEST_FAILED;
943                         rcb->req_pending = false;
944                 }
945         }
946 }
947
948
949 /*
950  * Uninitialize the resources used during PQI initialization.
951  */
952 void
953 pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
954 {
955         int i, ret;
956
957         DBG_FUNC("IN\n");
958
959         /* Wait for any rescan to finish */
960         pqisrc_wait_for_rescan_complete(softs);
961
962         /* Wait for commands to complete */
963         ret = pqisrc_wait_for_cmnd_complete(softs);
964
965         /* disable and free the interrupt resources */
966         os_destroy_intr(softs);
967
968         /* Complete all pending commands. */
969         if(ret != PQI_STATUS_SUCCESS) {
970                 pqisrc_complete_internal_cmds(softs);
971                 os_complete_outstanding_cmds_nodevice(softs);
972         }
973
974         if(softs->devlist_lockcreated==true){
975                 os_uninit_spinlock(&softs->devlist_lock);
976                 softs->devlist_lockcreated = false;
977         }
978
979         for (i = 0; i <  softs->num_op_raid_ibq; i++) {
980                 /* OP RAID IB Q */
981                 if(softs->op_raid_ib_q[i].lockcreated==true){
982                         OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
983                         softs->op_raid_ib_q[i].lockcreated = false;
984                 }
985                 /* OP AIO IB Q */
986                 if(softs->op_aio_ib_q[i].lockcreated==true){
987                         OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
988                         softs->op_aio_ib_q[i].lockcreated = false;
989                 }
990         }
991
992         /* Free Op queues */
993         os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
994         os_dma_mem_free(softs, &softs->op_obq_dma_mem);
995         os_dma_mem_free(softs, &softs->event_q_dma_mem);
996
997
998
999         /* Free  rcb */
1000         pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
1001
1002         /* Free request id lists */
1003         pqisrc_destroy_taglist(softs,&softs->taglist);
1004
1005         if(softs->admin_ib_queue.lockcreated==true) {
1006                 OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
1007                 softs->admin_ib_queue.lockcreated = false;
1008         }
1009
1010         /* Free Admin Queue */
1011         os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
1012
1013         /* Switch back to SIS mode */
1014         if (pqisrc_force_sis(softs)) {
1015                 DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
1016         }
1017
1018         DBG_FUNC("OUT\n");
1019 }
1020
1021 /*
1022  * Function to initialize the adapter settings.
1023  */
1024 int
1025 pqisrc_init(pqisrc_softstate_t *softs)
1026 {
1027         int ret = 0;
1028         int i = 0, j = 0;
1029
1030         DBG_FUNC("IN\n");
1031
1032         check_struct_sizes();
1033
1034         /* Init the Sync interface */
1035         ret = pqisrc_sis_init(softs);
1036         if (ret) {
1037                 DBG_ERR("SIS Init failed with error %d\n", ret);
1038                 goto err_out;
1039         }
1040
1041         ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
1042         if(ret != PQI_STATUS_SUCCESS){
1043                 DBG_ERR(" Failed to initialize scan lock\n");
1044                 goto err_scan_lock;
1045         }
1046
1047         /* Init the PQI interface */
1048         ret = pqisrc_pqi_init(softs);
1049         if (ret) {
1050                 DBG_ERR("PQI Init failed with error %d\n", ret);
1051                 goto err_pqi;
1052         }
1053
1054         /* Setup interrupt */
1055         ret = os_setup_intr(softs);
1056         if (ret) {
1057                 DBG_ERR("Interrupt setup failed with error %d\n", ret);
1058                 goto err_intr;
1059         }
1060
1061         /* Report event configuration */
1062         ret = pqisrc_report_event_config(softs);
1063         if(ret){
1064                 DBG_ERR(" Failed to configure Report events\n");
1065                 goto err_event;
1066         }
1067
1068         /* Set event configuration*/
1069         ret = pqisrc_set_event_config(softs);
1070         if(ret){
1071                 DBG_ERR(" Failed to configure Set events\n");
1072                 goto err_event;
1073         }
1074
1075         /* Check for For PQI spanning */
1076         ret = pqisrc_get_ctrl_fw_version(softs);
1077         if(ret){
1078                 DBG_ERR(" Failed to get ctrl fw version\n");
1079                 goto err_fw_version;
1080         }
1081
1082         /* update driver version in to FW */
1083         ret = pqisrc_write_driver_version_to_host_wellness(softs);
1084         if (ret) {
1085                 DBG_ERR(" Failed to update driver version in to FW");
1086                 goto err_host_wellness;
1087         }
1088
1089
1090         os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
1091         ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
1092         if(ret){
1093                 DBG_ERR(" Failed to initialize devlist_lock\n");
1094                 softs->devlist_lockcreated=false;
1095                 goto err_lock;
1096         }
1097         softs->devlist_lockcreated = true;
1098
1099         /* Get the PQI configuration table to read heart-beat counter*/
1100         ret = pqisrc_process_config_table(softs);
1101         if (ret) {
1102                 DBG_ERR("Failed to process PQI configuration table %d\n", ret);
1103                 goto err_config_tab;
1104         }
1105
1106         softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
1107
1108         /* Init device list */
1109         for(i = 0; i < PQI_MAX_DEVICES; i++)
1110                 for(j = 0; j < PQI_MAX_MULTILUN; j++)
1111                         softs->device_list[i][j] = NULL;
1112
1113         pqisrc_init_targetid_pool(softs);
1114
1115         DBG_FUNC("OUT\n");
1116         return ret;
1117
1118 err_config_tab:
1119         if(softs->devlist_lockcreated==true){
1120                 os_uninit_spinlock(&softs->devlist_lock);
1121                 softs->devlist_lockcreated = false;
1122         }
1123 err_lock:
1124 err_fw_version:
1125 err_event:
1126 err_host_wellness:
1127 err_intr:
1128         pqisrc_pqi_uninit(softs);
1129 err_pqi:
1130         os_destroy_semaphore(&softs->scan_lock);
1131 err_scan_lock:
1132         pqisrc_sis_uninit(softs);
1133 err_out:
1134         DBG_FUNC("OUT failed\n");
1135         return ret;
1136 }
1137
1138 /*
1139  * Write all data in the adapter's battery-backed cache to
1140  * storage.
1141  */
1142 int
1143 pqisrc_flush_cache( pqisrc_softstate_t *softs,
1144                         enum pqisrc_flush_cache_event_type event_type)
1145 {
1146         int rval = PQI_STATUS_SUCCESS;
1147         pqisrc_raid_req_t request;
1148         pqisrc_bmic_flush_cache_t *flush_buff = NULL;
1149
1150         DBG_FUNC("IN\n");
1151
1152         if (pqisrc_ctrl_offline(softs))
1153                 return PQI_STATUS_FAILURE;
1154
1155         flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
1156         if (!flush_buff) {
1157                 DBG_ERR("Failed to allocate memory for flush cache params\n");
1158                 rval = PQI_STATUS_FAILURE;
1159                 return rval;
1160         }
1161
1162         flush_buff->halt_event = event_type;
1163
1164         memset(&request, 0, sizeof(request));
1165
1166         rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
1167                         sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
1168                         (uint8_t *)RAID_CTLR_LUNID, NULL);
1169         if (rval) {
1170                 DBG_ERR("error in build send raid req ret=%d\n", rval);
1171         }
1172
1173         if (flush_buff)
1174                 os_mem_free(softs, (void *)flush_buff,
1175                         sizeof(pqisrc_bmic_flush_cache_t));
1176
1177         DBG_FUNC("OUT\n");
1178
1179         return rval;
1180 }
1181
1182 /*
1183  * Uninitialize the adapter.
1184  */
1185 void
1186 pqisrc_uninit(pqisrc_softstate_t *softs)
1187 {
1188         DBG_FUNC("IN\n");
1189
1190         pqisrc_pqi_uninit(softs);
1191
1192         pqisrc_sis_uninit(softs);
1193
1194         os_destroy_semaphore(&softs->scan_lock);
1195
1196         pqisrc_cleanup_devices(softs);
1197
1198         DBG_FUNC("OUT\n");
1199 }