2 * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include "smartpqi_includes.h"
30 /* 5 mins timeout for quiesce */
31 #define PQI_QUIESCE_TIMEOUT 300000
34 * Request the adapter to get PQI capabilities supported.
37 pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
39 int ret = PQI_STATUS_SUCCESS;
43 gen_adm_req_iu_t admin_req;
44 gen_adm_resp_iu_t admin_resp;
45 dma_mem_t pqi_cap_dma_buf;
46 pqi_dev_cap_t *capability = NULL;
47 pqi_iu_layer_desc_t *iu_layer_desc = NULL;
49 /* Allocate Non DMA memory */
50 capability = os_mem_alloc(softs, sizeof(*capability));
52 DBG_ERR("Failed to allocate memory for capability\n");
53 ret = PQI_STATUS_FAILURE;
57 memset(&admin_req, 0, sizeof(admin_req));
58 memset(&admin_resp, 0, sizeof(admin_resp));
60 memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
61 pqi_cap_dma_buf.tag = "pqi_cap_buf";
62 pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
63 pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
65 ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
67 DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
71 admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
72 admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
73 admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
74 admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
75 admin_req.req_type.general_func.sg_desc.type = SGL_DESCRIPTOR_CODE_DATA_BLOCK;
77 ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
78 if( PQI_STATUS_SUCCESS == ret) {
80 pqi_cap_dma_buf.virt_addr,
81 pqi_cap_dma_buf.size);
83 DBG_ERR("Failed to send admin req report pqi device capability\n");
88 softs->pqi_dev_cap.max_iqs = capability->max_iqs;
89 softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
90 softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
91 softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
92 softs->pqi_dev_cap.max_oqs = capability->max_oqs;
93 softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
94 softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
95 softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
97 iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
98 softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
99 softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
100 softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
102 DBG_INIT("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
103 DBG_INIT("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
104 DBG_INIT("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
105 DBG_INIT("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
106 DBG_INIT("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
107 DBG_INIT("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
108 DBG_INIT("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
109 DBG_INIT("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
110 DBG_INIT("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
111 DBG_INIT("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
112 DBG_INIT("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
115 os_mem_free(softs, (void *)capability,
116 REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
117 os_dma_mem_free(softs, &pqi_cap_dma_buf);
123 os_dma_mem_free(softs, &pqi_cap_dma_buf);
126 os_mem_free(softs, (void *)capability,
127 REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
129 DBG_FUNC("failed OUT\n");
130 return PQI_STATUS_FAILURE;
134 * Function used to deallocate the used rcb.
137 pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
144 num_req = softs->max_outstanding_io + 1;
145 size = num_req * sizeof(rcb_t);
146 for (i = 1; i < req_count; i++)
147 os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
148 os_mem_free(softs, (void *)softs->rcb, size);
155 * Allocate memory for rcb and SG descriptors.
158 pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
160 int ret = PQI_STATUS_SUCCESS;
162 uint32_t num_req = 0;
163 uint32_t sg_buf_size = 0;
164 uint64_t alloc_size = 0;
169 /* Set maximum outstanding requests */
170 /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
171 * The rcb will be accessed by using the tag as index
172 * As 0 tag index is not used, we need to allocate one extra.
174 softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
175 num_req = softs->max_outstanding_io + 1;
176 DBG_INIT("Max Outstanding IO reset to %d\n", num_req);
178 alloc_size = num_req * sizeof(rcb_t);
180 /* Allocate Non DMA memory */
181 rcb = os_mem_alloc(softs, alloc_size);
183 DBG_ERR("Failed to allocate memory for rcb\n");
184 ret = PQI_STATUS_FAILURE;
189 /* Allocate sg dma memory for sg chain */
190 sg_buf_size = softs->pqi_cap.max_sg_elem *
193 prcb = &softs->rcb[1];
195 for(i=1; i < num_req; i++) {
197 sprintf(tag, "sg_dma_buf%d", i);
198 softs->sg_dma_desc[i].tag = tag;
199 softs->sg_dma_desc[i].size = sg_buf_size;
200 softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
202 ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
204 DBG_ERR("Failed to Allocate sg desc %d\n", ret);
205 ret = PQI_STATUS_FAILURE;
208 prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
209 prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
216 pqisrc_free_rcb(softs, i);
218 DBG_FUNC("failed OUT\n");
223 * Function used to decide the operational queue configuration params
224 * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
227 pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
229 uint16_t total_iq_elements;
233 DBG_INIT("softs->intr_count : %d softs->num_cpus_online : %d",
234 softs->intr_count, softs->num_cpus_online);
236 if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
237 /* Share the event and Operational queue. */
238 softs->num_op_obq = 1;
239 softs->share_opq_and_eventq = true;
242 /* Note : One OBQ (OBQ0) reserved for event queue */
243 softs->num_op_obq = MIN(softs->num_cpus_online,
244 softs->intr_count) - 1;
245 softs->share_opq_and_eventq = false;
247 /* If the available interrupt count is more than one,
248 we dont need to share the interrupt for IO and event queue */
249 if (softs->intr_count > 1)
250 softs->share_opq_and_eventq = false;
252 DBG_INIT("softs->num_op_obq : %d\n",softs->num_op_obq);
254 softs->num_op_raid_ibq = softs->num_op_obq;
255 softs->num_op_aio_ibq = softs->num_op_raid_ibq;
256 softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16;
257 softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
258 if (softs->max_ib_iu_length_per_fw == 256 &&
259 softs->ob_spanning_supported) {
260 /* older f/w that doesn't actually support spanning. */
261 softs->max_ib_iu_length = softs->ibq_elem_size;
263 /* max. inbound IU length is an multiple of our inbound element size. */
264 softs->max_ib_iu_length =
265 (softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
266 softs->ibq_elem_size;
269 /* If Max. Outstanding IO came with Max. Spanning element count then,
270 needed elements per IO are multiplication of
271 Max.Outstanding IO and Max.Spanning element */
272 total_iq_elements = (softs->max_outstanding_io *
273 (softs->max_ib_iu_length / softs->ibq_elem_size));
275 softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
276 softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
277 softs->pqi_dev_cap.max_iq_elements);
279 softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
280 softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
281 softs->pqi_dev_cap.max_oq_elements);
283 softs->max_sg_per_iu = ((softs->max_ib_iu_length -
284 softs->ibq_elem_size) /
286 MAX_EMBEDDED_SG_IN_FIRST_IU;
288 DBG_INIT("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
289 DBG_INIT("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
290 DBG_INIT("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
291 DBG_INIT("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
297 * Configure the operational queue parameters.
300 pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
302 int ret = PQI_STATUS_SUCCESS;
304 /* Get the PQI capability,
305 REPORT PQI DEVICE CAPABILITY request */
306 ret = pqisrc_report_pqi_capability(softs);
308 DBG_ERR("Failed to send report pqi dev capability request : %d\n",
313 /* Reserve required no of slots for internal requests */
314 softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
316 /* Decide the Op queue configuration */
317 pqisrc_decide_opq_config(softs);
323 DBG_FUNC("OUT failed\n");
328 * Validate the PQI mode of adapter.
331 pqisrc_check_pqimode(pqisrc_softstate_t *softs)
333 int ret = PQI_STATUS_FAILURE;
335 uint64_t signature = 0;
339 /* Check the PQI device signature */
340 tmo = PQISRC_PQIMODE_READY_TIMEOUT;
342 signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
344 if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
345 sizeof(uint64_t)) == 0) {
346 ret = PQI_STATUS_SUCCESS;
349 OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
352 PRINT_PQI_SIGNATURE(signature);
355 DBG_ERR("PQI Signature is invalid\n");
356 ret = PQI_STATUS_TIMEOUT;
360 tmo = PQISRC_PQIMODE_READY_TIMEOUT;
361 /* Check function and status code for the device */
362 COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
363 PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
365 DBG_ERR("PQI device is not in IDLE state\n");
366 ret = PQI_STATUS_TIMEOUT;
371 tmo = PQISRC_PQIMODE_READY_TIMEOUT;
372 /* Check the PQI device status register */
373 COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
374 PQI_DEV_STATE_AT_INIT, tmo);
376 DBG_ERR("PQI Registers are not ready\n");
377 ret = PQI_STATUS_TIMEOUT;
384 DBG_FUNC("OUT failed\n");
388 /* PQI Feature processing */
390 pqisrc_config_table_update(struct pqisrc_softstate *softs,
391 uint16_t first_section, uint16_t last_section)
393 pqi_vendor_general_request_t request;
394 int ret = PQI_STATUS_FAILURE;
396 memset(&request, 0, sizeof(request));
398 request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL;
399 request.header.iu_length = sizeof(request) - PQI_REQUEST_HEADER_LENGTH;
400 request.function_code = PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE;
401 request.data.config_table_update.first_section = first_section;
402 request.data.config_table_update.last_section = last_section;
404 ret = pqisrc_build_send_vendor_request(softs, &request, NULL);
406 if (ret != PQI_STATUS_SUCCESS) {
407 DBG_ERR("Failed to submit vendor general request IU, Ret status: %d\n", ret);
408 return PQI_STATUS_FAILURE;
411 return PQI_STATUS_SUCCESS;
415 boolean_t pqi_is_firmware_feature_supported(
416 struct pqi_conf_table_firmware_features *firmware_feature_list,
417 unsigned int bit_position)
419 unsigned int byte_index;
421 byte_index = bit_position / BITS_PER_BYTE;
423 if (byte_index >= firmware_feature_list->num_elements)
426 return firmware_feature_list->features_supported[byte_index] &
427 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
431 boolean_t pqi_is_firmware_feature_enabled(
432 struct pqi_conf_table_firmware_features *firmware_feature_list,
433 uint8_t *firmware_features_addr, unsigned int bit_position)
435 unsigned int byte_index;
436 uint8_t *feature_enabled_addr;
438 byte_index = (bit_position / BITS_PER_BYTE) +
439 (firmware_feature_list->num_elements * 2);
441 feature_enabled_addr = firmware_features_addr +
442 offsetof(struct pqi_conf_table_firmware_features,
443 features_supported) + byte_index;
445 return *feature_enabled_addr &
446 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
450 pqi_request_firmware_feature(
451 struct pqi_conf_table_firmware_features *firmware_feature_list,
452 unsigned int bit_position)
454 unsigned int byte_index;
456 byte_index = (bit_position / BITS_PER_BYTE) +
457 firmware_feature_list->num_elements;
459 firmware_feature_list->features_supported[byte_index] |=
460 (1 << (bit_position % BITS_PER_BYTE));
463 /* Update PQI config table firmware features section and inform the firmware */
465 pqisrc_set_host_requested_firmware_feature(pqisrc_softstate_t *softs,
466 struct pqi_conf_table_firmware_features *firmware_feature_list)
468 uint8_t *request_feature_addr;
469 void *request_feature_abs_addr;
471 request_feature_addr = firmware_feature_list->features_supported +
472 firmware_feature_list->num_elements;
473 request_feature_abs_addr = softs->fw_features_section_abs_addr +
474 (request_feature_addr - (uint8_t*)firmware_feature_list);
476 os_io_memcpy(request_feature_abs_addr, request_feature_addr,
477 firmware_feature_list->num_elements);
479 return pqisrc_config_table_update(softs,
480 PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES,
481 PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES);
484 /* Check firmware has enabled the feature specified in the respective bit position. */
486 pqisrc_is_firmware_feature_enabled(pqisrc_softstate_t *softs,
487 struct pqi_conf_table_firmware_features *firmware_feature_list, uint16_t bit_position)
490 uint8_t *features_enabled_abs_addr;
492 byte_index = (bit_position / BITS_PER_BYTE) +
493 (firmware_feature_list->num_elements * 2);
495 features_enabled_abs_addr = softs->fw_features_section_abs_addr +
496 offsetof(struct pqi_conf_table_firmware_features,features_supported) + byte_index;
498 return *features_enabled_abs_addr &
499 (1 << (bit_position % BITS_PER_BYTE)) ? true : false;
503 pqi_firmware_feature_status(struct pqisrc_softstate *softs,
504 struct pqi_firmware_feature *firmware_feature)
506 switch(firmware_feature->feature_bit) {
507 case PQI_FIRMWARE_FEATURE_OFA:
509 case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT:
510 softs->timeout_in_passthrough = true;
512 case PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT:
513 softs->timeout_in_tmf = true;
516 DBG_NOTE("Nothing to do \n");
520 /* Firmware features supported by the driver */
522 pqi_firmware_feature pqi_firmware_features[] = {
524 .feature_name = "Support timeout for pass-through commands",
525 .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_RAID_IU_SUPPORT,
526 .feature_status = pqi_firmware_feature_status,
529 .feature_name = "Support timeout for LUN Reset TMF",
530 .feature_bit = PQI_FIRMWARE_FEATURE_TIMEOUT_IN_TMF_IU_SUPPORT,
531 .feature_status = pqi_firmware_feature_status,
536 pqisrc_process_firmware_features(pqisrc_softstate_t *softs)
539 struct pqi_conf_table_firmware_features *firmware_feature_list;
541 unsigned int num_features_requested;
543 firmware_feature_list = (struct pqi_conf_table_firmware_features*)
544 softs->fw_features_section_abs_addr;
546 /* Check features and request those supported by firmware and driver.*/
547 for (i = 0, num_features_requested = 0;
548 i < ARRAY_SIZE(pqi_firmware_features); i++) {
549 /* Firmware support it ? */
550 if (pqi_is_firmware_feature_supported(firmware_feature_list,
551 pqi_firmware_features[i].feature_bit)) {
552 pqi_request_firmware_feature(firmware_feature_list,
553 pqi_firmware_features[i].feature_bit);
554 pqi_firmware_features[i].supported = true;
555 num_features_requested++;
556 DBG_NOTE("%s supported by driver, requesting firmware to enable it\n",
557 pqi_firmware_features[i].feature_name);
559 DBG_NOTE("%s supported by driver, but not by current firmware\n",
560 pqi_firmware_features[i].feature_name);
563 if (num_features_requested == 0)
566 rc = pqisrc_set_host_requested_firmware_feature(softs, firmware_feature_list);
568 DBG_ERR("Failed to update pqi config table\n");
572 for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) {
573 if (pqi_is_firmware_feature_enabled(firmware_feature_list,
574 softs->fw_features_section_abs_addr, pqi_firmware_features[i].feature_bit)) {
575 pqi_firmware_features[i].enabled = true;
576 DBG_NOTE("Firmware feature %s enabled \n",pqi_firmware_features[i].feature_name);
577 if(pqi_firmware_features[i].feature_status)
578 pqi_firmware_features[i].feature_status(softs, &(pqi_firmware_features[i]));
584 * Get the PQI configuration table parameters.
585 * Currently using for heart-beat counter scratch-pad register.
588 pqisrc_process_config_table(pqisrc_softstate_t *softs)
590 int ret = PQI_STATUS_FAILURE;
591 uint32_t config_table_size;
592 uint32_t section_off;
593 uint8_t *config_table_abs_addr;
594 struct pqi_conf_table *conf_table;
595 struct pqi_conf_table_section_header *section_hdr;
597 config_table_size = softs->pqi_cap.conf_tab_sz;
599 if (config_table_size < sizeof(*conf_table) ||
600 config_table_size > PQI_CONF_TABLE_MAX_LEN) {
601 DBG_ERR("Invalid PQI conf table length of %u\n",
606 conf_table = os_mem_alloc(softs, config_table_size);
608 DBG_ERR("Failed to allocate memory for PQI conf table\n");
612 if (config_table_size < sizeof(conf_table) ||
613 config_table_size > PQI_CONF_TABLE_MAX_LEN) {
614 DBG_ERR("Invalid PQI conf table length of %u\n",
619 config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
620 softs->pqi_cap.conf_tab_off);
622 PCI_MEM_GET_BUF(softs, config_table_abs_addr,
623 softs->pqi_cap.conf_tab_off,
624 (uint8_t*)conf_table, config_table_size);
627 if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
628 sizeof(conf_table->sign)) != 0) {
629 DBG_ERR("Invalid PQI config signature\n");
633 section_off = LE_32(conf_table->first_section_off);
635 while (section_off) {
637 if (section_off+ sizeof(*section_hdr) >= config_table_size) {
638 DBG_INFO("Reached end of PQI config table. Breaking off.\n");
642 section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
644 switch (LE_16(section_hdr->section_id)) {
645 case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
646 case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
647 case PQI_CONF_TABLE_SECTION_DEBUG:
649 case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
650 softs->fw_features_section_off = softs->pqi_cap.conf_tab_off + section_off;
651 softs->fw_features_section_abs_addr = softs->pci_mem_base_vaddr + softs->fw_features_section_off;
652 pqisrc_process_firmware_features(softs);
654 case PQI_CONF_TABLE_SECTION_HEARTBEAT:
655 softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
657 offsetof(struct pqi_conf_table_heartbeat,
659 softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
660 softs->heartbeat_counter_off);
661 ret = PQI_STATUS_SUCCESS;
664 DBG_INFO("unrecognized PQI config table section ID: 0x%x\n",
665 LE_16(section_hdr->section_id));
668 section_off = LE_16(section_hdr->next_section_off);
671 os_mem_free(softs, (void *)conf_table,config_table_size);
675 /* Wait for PQI reset completion for the adapter*/
677 pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
679 int ret = PQI_STATUS_SUCCESS;
680 pqi_reset_reg_t reset_reg;
681 int pqi_reset_timeout = 0;
683 uint32_t max_timeout = 0;
685 val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
687 max_timeout = (val & 0xFFFF00000000) >> 32;
689 DBG_INIT("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
692 if (pqi_reset_timeout++ == max_timeout) {
693 return PQI_STATUS_TIMEOUT;
695 OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
696 reset_reg.all_bits = PCI_MEM_GET32(softs,
697 &softs->pqi_reg->dev_reset, PQI_DEV_RESET);
698 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
706 * Function used to perform PQI hard reset.
709 pqi_reset(pqisrc_softstate_t *softs)
711 int ret = PQI_STATUS_SUCCESS;
713 pqi_reset_reg_t pqi_reset_reg;
717 if (true == softs->ctrl_in_pqi_mode) {
719 if (softs->pqi_reset_quiesce_allowed) {
720 val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
722 val |= SIS_PQI_RESET_QUIESCE;
723 PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
724 LEGACY_SIS_IDBR, LE_32(val));
725 ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
727 DBG_ERR("failed with error %d during quiesce\n", ret);
732 pqi_reset_reg.all_bits = 0;
733 pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
734 pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
736 PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
737 LE_32(pqi_reset_reg.all_bits));
739 ret = pqisrc_wait_for_pqi_reset_completion(softs);
741 DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
745 softs->ctrl_in_pqi_mode = false;
751 * Initialize the adapter with supported PQI configuration.
754 pqisrc_pqi_init(pqisrc_softstate_t *softs)
756 int ret = PQI_STATUS_SUCCESS;
760 /* Check the PQI signature */
761 ret = pqisrc_check_pqimode(softs);
763 DBG_ERR("failed to switch to pqi\n");
767 PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
768 softs->ctrl_in_pqi_mode = true;
770 /* Get the No. of Online CPUs,NUMA/Processor config from OS */
771 ret = os_get_processor_config(softs);
773 DBG_ERR("Failed to get processor config from OS %d\n",
778 softs->intr_type = INTR_TYPE_NONE;
780 /* Get the interrupt count, type, priority available from OS */
781 ret = os_get_intr_config(softs);
783 DBG_ERR("Failed to get interrupt config from OS %d\n",
788 /*Enable/Set Legacy INTx Interrupt mask clear pqi register,
789 *if allocated interrupt is legacy type.
791 if (INTR_TYPE_FIXED == softs->intr_type) {
792 pqisrc_configure_legacy_intx(softs, true);
793 sis_enable_intx(softs);
796 /* Create Admin Queue pair*/
797 ret = pqisrc_create_admin_queue(softs);
799 DBG_ERR("Failed to configure admin queue\n");
800 goto err_admin_queue;
803 /* For creating event and IO operational queues we have to submit
804 admin IU requests.So Allocate resources for submitting IUs */
806 /* Allocate the request container block (rcb) */
807 ret = pqisrc_allocate_rcb(softs);
808 if (ret == PQI_STATUS_FAILURE) {
809 DBG_ERR("Failed to allocate rcb \n");
813 /* Allocate & initialize request id queue */
814 ret = pqisrc_init_taglist(softs,&softs->taglist,
815 softs->max_outstanding_io);
817 DBG_ERR("Failed to allocate memory for request id q : %d\n",
822 ret = pqisrc_configure_op_queues(softs);
824 DBG_ERR("Failed to configure op queue\n");
828 /* Create Operational queues */
829 ret = pqisrc_create_op_queues(softs);
831 DBG_ERR("Failed to create op queue\n");
832 ret = PQI_STATUS_FAILURE;
836 softs->ctrl_online = true;
843 pqisrc_destroy_taglist(softs,&softs->taglist);
845 pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
847 pqisrc_destroy_admin_queue(softs);
849 os_free_intr_config(softs);
851 DBG_FUNC("OUT failed\n");
852 return PQI_STATUS_FAILURE;
856 pqisrc_force_sis(pqisrc_softstate_t *softs)
858 int ret = PQI_STATUS_SUCCESS;
860 if (SIS_IS_KERNEL_PANIC(softs)) {
861 DBG_INIT("Controller FW is not runnning");
862 return PQI_STATUS_FAILURE;
865 if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
869 if (SIS_IS_KERNEL_UP(softs)) {
870 PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
873 /* Disable interrupts ? */
874 sis_disable_interrupt(softs);
876 /* reset pqi, this will delete queues */
877 ret = pqi_reset(softs);
882 ret = pqisrc_reenable_sis(softs);
887 PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
893 pqisrc_wait_for_cmnd_complete(pqisrc_softstate_t *softs)
896 int ret = PQI_STATUS_SUCCESS;
898 DBG_NOTE("softs->taglist.num_elem : %d",softs->taglist.num_elem);
900 if (softs->taglist.num_elem == softs->max_outstanding_io)
903 DBG_WARN("%d commands pending\n",
904 softs->max_outstanding_io - softs->taglist.num_elem);
908 /* Since heartbeat timer stopped ,check for firmware status*/
909 if (SIS_IS_KERNEL_PANIC(softs)) {
910 DBG_ERR("Controller FW is not running\n");
911 return PQI_STATUS_FAILURE;
914 if (softs->taglist.num_elem != softs->max_outstanding_io) {
915 /* Sleep for 1 msec */
918 if(count % 1000 == 0) {
919 DBG_WARN("Waited for %d seconds", count/1000);
921 if (count >= PQI_QUIESCE_TIMEOUT) {
922 return PQI_STATUS_FAILURE;
933 pqisrc_complete_internal_cmds(pqisrc_softstate_t *softs)
939 for (tag = 1; tag <= softs->max_outstanding_io; tag++) {
940 rcb = &softs->rcb[tag];
941 if(rcb->req_pending && is_internal_req(rcb)) {
942 rcb->status = REQUEST_FAILED;
943 rcb->req_pending = false;
950 * Uninitialize the resources used during PQI initialization.
953 pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
959 /* Wait for any rescan to finish */
960 pqisrc_wait_for_rescan_complete(softs);
962 /* Wait for commands to complete */
963 ret = pqisrc_wait_for_cmnd_complete(softs);
965 /* disable and free the interrupt resources */
966 os_destroy_intr(softs);
968 /* Complete all pending commands. */
969 if(ret != PQI_STATUS_SUCCESS) {
970 pqisrc_complete_internal_cmds(softs);
971 os_complete_outstanding_cmds_nodevice(softs);
974 if(softs->devlist_lockcreated==true){
975 os_uninit_spinlock(&softs->devlist_lock);
976 softs->devlist_lockcreated = false;
979 for (i = 0; i < softs->num_op_raid_ibq; i++) {
981 if(softs->op_raid_ib_q[i].lockcreated==true){
982 OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
983 softs->op_raid_ib_q[i].lockcreated = false;
986 if(softs->op_aio_ib_q[i].lockcreated==true){
987 OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
988 softs->op_aio_ib_q[i].lockcreated = false;
993 os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
994 os_dma_mem_free(softs, &softs->op_obq_dma_mem);
995 os_dma_mem_free(softs, &softs->event_q_dma_mem);
1000 pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
1002 /* Free request id lists */
1003 pqisrc_destroy_taglist(softs,&softs->taglist);
1005 if(softs->admin_ib_queue.lockcreated==true) {
1006 OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
1007 softs->admin_ib_queue.lockcreated = false;
1010 /* Free Admin Queue */
1011 os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
1013 /* Switch back to SIS mode */
1014 if (pqisrc_force_sis(softs)) {
1015 DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
1022 * Function to initialize the adapter settings.
1025 pqisrc_init(pqisrc_softstate_t *softs)
1032 check_struct_sizes();
1034 /* Init the Sync interface */
1035 ret = pqisrc_sis_init(softs);
1037 DBG_ERR("SIS Init failed with error %d\n", ret);
1041 ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
1042 if(ret != PQI_STATUS_SUCCESS){
1043 DBG_ERR(" Failed to initialize scan lock\n");
1047 /* Init the PQI interface */
1048 ret = pqisrc_pqi_init(softs);
1050 DBG_ERR("PQI Init failed with error %d\n", ret);
1054 /* Setup interrupt */
1055 ret = os_setup_intr(softs);
1057 DBG_ERR("Interrupt setup failed with error %d\n", ret);
1061 /* Report event configuration */
1062 ret = pqisrc_report_event_config(softs);
1064 DBG_ERR(" Failed to configure Report events\n");
1068 /* Set event configuration*/
1069 ret = pqisrc_set_event_config(softs);
1071 DBG_ERR(" Failed to configure Set events\n");
1075 /* Check for For PQI spanning */
1076 ret = pqisrc_get_ctrl_fw_version(softs);
1078 DBG_ERR(" Failed to get ctrl fw version\n");
1079 goto err_fw_version;
1082 /* update driver version in to FW */
1083 ret = pqisrc_write_driver_version_to_host_wellness(softs);
1085 DBG_ERR(" Failed to update driver version in to FW");
1086 goto err_host_wellness;
1090 os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
1091 ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
1093 DBG_ERR(" Failed to initialize devlist_lock\n");
1094 softs->devlist_lockcreated=false;
1097 softs->devlist_lockcreated = true;
1099 /* Get the PQI configuration table to read heart-beat counter*/
1100 ret = pqisrc_process_config_table(softs);
1102 DBG_ERR("Failed to process PQI configuration table %d\n", ret);
1103 goto err_config_tab;
1106 softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
1108 /* Init device list */
1109 for(i = 0; i < PQI_MAX_DEVICES; i++)
1110 for(j = 0; j < PQI_MAX_MULTILUN; j++)
1111 softs->device_list[i][j] = NULL;
1113 pqisrc_init_targetid_pool(softs);
1119 if(softs->devlist_lockcreated==true){
1120 os_uninit_spinlock(&softs->devlist_lock);
1121 softs->devlist_lockcreated = false;
1128 pqisrc_pqi_uninit(softs);
1130 os_destroy_semaphore(&softs->scan_lock);
1132 pqisrc_sis_uninit(softs);
1134 DBG_FUNC("OUT failed\n");
1139 * Write all data in the adapter's battery-backed cache to
1143 pqisrc_flush_cache( pqisrc_softstate_t *softs,
1144 enum pqisrc_flush_cache_event_type event_type)
1146 int rval = PQI_STATUS_SUCCESS;
1147 pqisrc_raid_req_t request;
1148 pqisrc_bmic_flush_cache_t *flush_buff = NULL;
1152 if (pqisrc_ctrl_offline(softs))
1153 return PQI_STATUS_FAILURE;
1155 flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
1157 DBG_ERR("Failed to allocate memory for flush cache params\n");
1158 rval = PQI_STATUS_FAILURE;
1162 flush_buff->halt_event = event_type;
1164 memset(&request, 0, sizeof(request));
1166 rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
1167 sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
1168 (uint8_t *)RAID_CTLR_LUNID, NULL);
1170 DBG_ERR("error in build send raid req ret=%d\n", rval);
1174 os_mem_free(softs, (void *)flush_buff,
1175 sizeof(pqisrc_bmic_flush_cache_t));
1183 * Uninitialize the adapter.
1186 pqisrc_uninit(pqisrc_softstate_t *softs)
1190 pqisrc_pqi_uninit(softs);
1192 pqisrc_sis_uninit(softs);
1194 os_destroy_semaphore(&softs->scan_lock);
1196 pqisrc_cleanup_devices(softs);