2 * Copyright (c) 2018 Microsemi Corporation.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include "smartpqi_includes.h"
32 * Request the adapter to get PQI capabilities supported.
34 static int pqisrc_report_pqi_capability(pqisrc_softstate_t *softs)
36 int ret = PQI_STATUS_SUCCESS;
40 gen_adm_req_iu_t admin_req;
41 gen_adm_resp_iu_t admin_resp;
42 dma_mem_t pqi_cap_dma_buf;
43 pqi_dev_cap_t *capability = NULL;
44 pqi_iu_layer_desc_t *iu_layer_desc = NULL;
46 /* Allocate Non DMA memory */
47 capability = os_mem_alloc(softs, sizeof(*capability));
49 DBG_ERR("Failed to allocate memory for capability\n");
50 ret = PQI_STATUS_FAILURE;
54 memset(&admin_req, 0, sizeof(admin_req));
55 memset(&admin_resp, 0, sizeof(admin_resp));
57 memset(&pqi_cap_dma_buf, 0, sizeof(struct dma_mem));
58 pqi_cap_dma_buf.tag = "pqi_cap_buf";
59 pqi_cap_dma_buf.size = REPORT_PQI_DEV_CAP_DATA_BUF_SIZE;
60 pqi_cap_dma_buf.align = PQISRC_DEFAULT_DMA_ALIGN;
62 ret = os_dma_mem_alloc(softs, &pqi_cap_dma_buf);
64 DBG_ERR("Failed to allocate capability DMA buffer : %d\n", ret);
68 admin_req.fn_code = PQI_FUNCTION_REPORT_DEV_CAP;
69 admin_req.req_type.general_func.buf_size = pqi_cap_dma_buf.size;
70 admin_req.req_type.general_func.sg_desc.length = pqi_cap_dma_buf.size;
71 admin_req.req_type.general_func.sg_desc.addr = pqi_cap_dma_buf.dma_addr;
72 admin_req.req_type.general_func.sg_desc.type = SGL_DESCRIPTOR_CODE_DATA_BLOCK;
74 ret = pqisrc_submit_admin_req(softs, &admin_req, &admin_resp);
75 if( PQI_STATUS_SUCCESS == ret) {
77 pqi_cap_dma_buf.virt_addr,
78 pqi_cap_dma_buf.size);
80 DBG_ERR("Failed to send admin req report pqi device capability\n");
85 softs->pqi_dev_cap.max_iqs = capability->max_iqs;
86 softs->pqi_dev_cap.max_iq_elements = capability->max_iq_elements;
87 softs->pqi_dev_cap.max_iq_elem_len = capability->max_iq_elem_len;
88 softs->pqi_dev_cap.min_iq_elem_len = capability->min_iq_elem_len;
89 softs->pqi_dev_cap.max_oqs = capability->max_oqs;
90 softs->pqi_dev_cap.max_oq_elements = capability->max_oq_elements;
91 softs->pqi_dev_cap.max_oq_elem_len = capability->max_oq_elem_len;
92 softs->pqi_dev_cap.intr_coales_time_granularity = capability->intr_coales_time_granularity;
94 iu_layer_desc = &capability->iu_layer_desc[PQI_PROTOCOL_SOP];
95 softs->max_ib_iu_length_per_fw = iu_layer_desc->max_ib_iu_len;
96 softs->ib_spanning_supported = iu_layer_desc->ib_spanning_supported;
97 softs->ob_spanning_supported = iu_layer_desc->ob_spanning_supported;
99 DBG_INFO("softs->pqi_dev_cap.max_iqs: %d\n", softs->pqi_dev_cap.max_iqs);
100 DBG_INFO("softs->pqi_dev_cap.max_iq_elements: %d\n", softs->pqi_dev_cap.max_iq_elements);
101 DBG_INFO("softs->pqi_dev_cap.max_iq_elem_len: %d\n", softs->pqi_dev_cap.max_iq_elem_len);
102 DBG_INFO("softs->pqi_dev_cap.min_iq_elem_len: %d\n", softs->pqi_dev_cap.min_iq_elem_len);
103 DBG_INFO("softs->pqi_dev_cap.max_oqs: %d\n", softs->pqi_dev_cap.max_oqs);
104 DBG_INFO("softs->pqi_dev_cap.max_oq_elements: %d\n", softs->pqi_dev_cap.max_oq_elements);
105 DBG_INFO("softs->pqi_dev_cap.max_oq_elem_len: %d\n", softs->pqi_dev_cap.max_oq_elem_len);
106 DBG_INFO("softs->pqi_dev_cap.intr_coales_time_granularity: %d\n", softs->pqi_dev_cap.intr_coales_time_granularity);
107 DBG_INFO("softs->max_ib_iu_length_per_fw: %d\n", softs->max_ib_iu_length_per_fw);
108 DBG_INFO("softs->ib_spanning_supported: %d\n", softs->ib_spanning_supported);
109 DBG_INFO("softs->ob_spanning_supported: %d\n", softs->ob_spanning_supported);
112 os_mem_free(softs, (void *)capability,
113 REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
114 os_dma_mem_free(softs, &pqi_cap_dma_buf);
120 os_dma_mem_free(softs, &pqi_cap_dma_buf);
123 os_mem_free(softs, (void *)capability,
124 REPORT_PQI_DEV_CAP_DATA_BUF_SIZE);
126 DBG_FUNC("failed OUT\n");
127 return PQI_STATUS_FAILURE;
131 * Function used to deallocate the used rcb.
133 void pqisrc_free_rcb(pqisrc_softstate_t *softs, int req_count)
141 num_req = softs->max_outstanding_io + 1;
142 size = num_req * sizeof(rcb_t);
143 for (i = 1; i < req_count; i++)
144 os_dma_mem_free(softs, &softs->sg_dma_desc[i]);
145 os_mem_free(softs, (void *)softs->rcb, size);
152 * Allocate memory for rcb and SG descriptors.
154 static int pqisrc_allocate_rcb(pqisrc_softstate_t *softs)
156 int ret = PQI_STATUS_SUCCESS;
158 uint32_t num_req = 0;
159 uint32_t sg_buf_size = 0;
160 uint64_t alloc_size = 0;
165 /* Set maximum outstanding requests */
166 /* The valid tag values are from 1, 2, ..., softs->max_outstanding_io
167 * The rcb will be accessed by using the tag as index
168 * As 0 tag index is not used, we need to allocate one extra.
170 softs->max_outstanding_io = softs->pqi_cap.max_outstanding_io;
171 num_req = softs->max_outstanding_io + 1;
172 DBG_INFO("Max Outstanding IO reset to %d\n", num_req);
174 alloc_size = num_req * sizeof(rcb_t);
176 /* Allocate Non DMA memory */
177 rcb = os_mem_alloc(softs, alloc_size);
179 DBG_ERR("Failed to allocate memory for rcb\n");
180 ret = PQI_STATUS_FAILURE;
185 /* Allocate sg dma memory for sg chain */
186 sg_buf_size = softs->pqi_cap.max_sg_elem *
189 prcb = &softs->rcb[1];
191 for(i=1; i < num_req; i++) {
193 sprintf(tag, "sg_dma_buf%d", i);
194 softs->sg_dma_desc[i].tag = tag;
195 softs->sg_dma_desc[i].size = sg_buf_size;
196 softs->sg_dma_desc[i].align = PQISRC_DEFAULT_DMA_ALIGN;
198 ret = os_dma_mem_alloc(softs, &softs->sg_dma_desc[i]);
200 DBG_ERR("Failed to Allocate sg desc %d\n", ret);
201 ret = PQI_STATUS_FAILURE;
204 prcb->sg_chain_virt = (sgt_t *)(softs->sg_dma_desc[i].virt_addr);
205 prcb->sg_chain_dma = (dma_addr_t)(softs->sg_dma_desc[i].dma_addr);
212 pqisrc_free_rcb(softs, i);
214 DBG_FUNC("failed OUT\n");
219 * Function used to decide the operational queue configuration params
220 * - no of ibq/obq, shared/non-shared interrupt resource, IU spanning support
222 void pqisrc_decide_opq_config(pqisrc_softstate_t *softs)
224 uint16_t total_iq_elements;
228 DBG_INFO("softs->intr_count : %d softs->num_cpus_online : %d",
229 softs->intr_count, softs->num_cpus_online);
231 if (softs->intr_count == 1 || softs->num_cpus_online == 1) {
232 /* Share the event and Operational queue. */
233 softs->num_op_obq = 1;
234 softs->share_opq_and_eventq = true;
237 /* Note : One OBQ (OBQ0) reserved for event queue */
238 softs->num_op_obq = MIN(softs->num_cpus_online,
239 softs->intr_count) - 1;
240 softs->num_op_obq = softs->intr_count - 1;
241 softs->share_opq_and_eventq = false;
246 * softs->num_cpus_online is set as number of physical CPUs,
247 * So we can have more queues/interrupts .
249 if (softs->intr_count > 1)
250 softs->share_opq_and_eventq = false;
253 DBG_INFO("softs->num_op_obq : %d\n",softs->num_op_obq);
255 softs->num_op_raid_ibq = softs->num_op_obq;
256 softs->num_op_aio_ibq = softs->num_op_raid_ibq;
257 softs->ibq_elem_size = softs->pqi_dev_cap.max_iq_elem_len * 16;
258 softs->obq_elem_size = softs->pqi_dev_cap.max_oq_elem_len * 16;
259 if (softs->max_ib_iu_length_per_fw == 256 &&
260 softs->ob_spanning_supported) {
261 /* older f/w that doesn't actually support spanning. */
262 softs->max_ib_iu_length = softs->ibq_elem_size;
264 /* max. inbound IU length is an multiple of our inbound element size. */
265 softs->max_ib_iu_length =
266 (softs->max_ib_iu_length_per_fw / softs->ibq_elem_size) *
267 softs->ibq_elem_size;
270 /* If Max. Outstanding IO came with Max. Spanning element count then,
271 needed elements per IO are multiplication of
272 Max.Outstanding IO and Max.Spanning element */
273 total_iq_elements = (softs->max_outstanding_io *
274 (softs->max_ib_iu_length / softs->ibq_elem_size));
276 softs->num_elem_per_op_ibq = total_iq_elements / softs->num_op_raid_ibq;
277 softs->num_elem_per_op_ibq = MIN(softs->num_elem_per_op_ibq,
278 softs->pqi_dev_cap.max_iq_elements);
280 softs->num_elem_per_op_obq = softs->max_outstanding_io / softs->num_op_obq;
281 softs->num_elem_per_op_obq = MIN(softs->num_elem_per_op_obq,
282 softs->pqi_dev_cap.max_oq_elements);
284 softs->max_sg_per_iu = ((softs->max_ib_iu_length -
285 softs->ibq_elem_size) /
287 MAX_EMBEDDED_SG_IN_FIRST_IU;
289 DBG_INFO("softs->max_ib_iu_length: %d\n", softs->max_ib_iu_length);
290 DBG_INFO("softs->num_elem_per_op_ibq: %d\n", softs->num_elem_per_op_ibq);
291 DBG_INFO("softs->num_elem_per_op_obq: %d\n", softs->num_elem_per_op_obq);
292 DBG_INFO("softs->max_sg_per_iu: %d\n", softs->max_sg_per_iu);
298 * Configure the operational queue parameters.
300 int pqisrc_configure_op_queues(pqisrc_softstate_t *softs)
302 int ret = PQI_STATUS_SUCCESS;
304 /* Get the PQI capability,
305 REPORT PQI DEVICE CAPABILITY request */
306 ret = pqisrc_report_pqi_capability(softs);
308 DBG_ERR("Failed to send report pqi dev capability request : %d\n",
313 /* Reserve required no of slots for internal requests */
314 softs->max_io_for_scsi_ml = softs->max_outstanding_io - PQI_RESERVED_IO_SLOTS_CNT;
316 /* Decide the Op queue configuration */
317 pqisrc_decide_opq_config(softs);
323 DBG_FUNC("OUT failed\n");
328 * Validate the PQI mode of adapter.
330 int pqisrc_check_pqimode(pqisrc_softstate_t *softs)
332 int ret = PQI_STATUS_FAILURE;
334 uint64_t signature = 0;
338 /* Check the PQI device signature */
339 tmo = PQISRC_PQIMODE_READY_TIMEOUT;
341 signature = LE_64(PCI_MEM_GET64(softs, &softs->pqi_reg->signature, PQI_SIGNATURE));
343 if (memcmp(&signature, PQISRC_PQI_DEVICE_SIGNATURE,
344 sizeof(uint64_t)) == 0) {
345 ret = PQI_STATUS_SUCCESS;
348 OS_SLEEP(PQISRC_MODE_READY_POLL_INTERVAL);
351 PRINT_PQI_SIGNATURE(signature);
354 DBG_ERR("PQI Signature is invalid\n");
355 ret = PQI_STATUS_TIMEOUT;
359 tmo = PQISRC_PQIMODE_READY_TIMEOUT;
360 /* Check function and status code for the device */
361 COND_WAIT((PCI_MEM_GET64(softs, &softs->pqi_reg->admin_q_config,
362 PQI_ADMINQ_CONFIG) == PQI_ADMIN_QUEUE_CONF_FUNC_STATUS_IDLE), tmo);
364 DBG_ERR("PQI device is not in IDLE state\n");
365 ret = PQI_STATUS_TIMEOUT;
370 tmo = PQISRC_PQIMODE_READY_TIMEOUT;
371 /* Check the PQI device status register */
372 COND_WAIT(LE_32(PCI_MEM_GET32(softs, &softs->pqi_reg->pqi_dev_status, PQI_DEV_STATUS)) &
373 PQI_DEV_STATE_AT_INIT, tmo);
375 DBG_ERR("PQI Registers are not ready\n");
376 ret = PQI_STATUS_TIMEOUT;
383 DBG_FUNC("OUT failed\n");
388 * Get the PQI configuration table parameters.
389 * Currently using for heart-beat counter scratch-pad register.
391 int pqisrc_process_config_table(pqisrc_softstate_t *softs)
393 int ret = PQI_STATUS_FAILURE;
394 uint32_t config_table_size;
395 uint32_t section_off;
396 uint8_t *config_table_abs_addr;
397 struct pqi_conf_table *conf_table;
398 struct pqi_conf_table_section_header *section_hdr;
400 config_table_size = softs->pqi_cap.conf_tab_sz;
402 if (config_table_size < sizeof(*conf_table) ||
403 config_table_size > PQI_CONF_TABLE_MAX_LEN) {
404 DBG_ERR("Invalid PQI conf table length of %u\n",
409 conf_table = os_mem_alloc(softs, config_table_size);
411 DBG_ERR("Failed to allocate memory for PQI conf table\n");
415 config_table_abs_addr = (uint8_t *)(softs->pci_mem_base_vaddr +
416 softs->pqi_cap.conf_tab_off);
418 PCI_MEM_GET_BUF(softs, config_table_abs_addr,
419 softs->pqi_cap.conf_tab_off,
420 (uint8_t*)conf_table, config_table_size);
423 if (memcmp(conf_table->sign, PQI_CONF_TABLE_SIGNATURE,
424 sizeof(conf_table->sign)) != 0) {
425 DBG_ERR("Invalid PQI config signature\n");
429 section_off = LE_32(conf_table->first_section_off);
431 while (section_off) {
433 if (section_off+ sizeof(*section_hdr) >= config_table_size) {
434 DBG_ERR("PQI config table section offset (%u) beyond \
435 end of config table (config table length: %u)\n",
436 section_off, config_table_size);
440 section_hdr = (struct pqi_conf_table_section_header *)((uint8_t *)conf_table + section_off);
442 switch (LE_16(section_hdr->section_id)) {
443 case PQI_CONF_TABLE_SECTION_GENERAL_INFO:
444 case PQI_CONF_TABLE_SECTION_FIRMWARE_FEATURES:
445 case PQI_CONF_TABLE_SECTION_FIRMWARE_ERRATA:
446 case PQI_CONF_TABLE_SECTION_DEBUG:
448 case PQI_CONF_TABLE_SECTION_HEARTBEAT:
449 softs->heartbeat_counter_off = softs->pqi_cap.conf_tab_off +
451 offsetof(struct pqi_conf_table_heartbeat,
453 softs->heartbeat_counter_abs_addr = (uint64_t *)(softs->pci_mem_base_vaddr +
454 softs->heartbeat_counter_off);
455 ret = PQI_STATUS_SUCCESS;
458 DBG_ERR("unrecognized PQI config table section ID: 0x%x\n",
459 LE_16(section_hdr->section_id));
462 section_off = LE_16(section_hdr->next_section_off);
465 os_mem_free(softs, (void *)conf_table,config_table_size);
469 /* Wait for PQI reset completion for the adapter*/
470 int pqisrc_wait_for_pqi_reset_completion(pqisrc_softstate_t *softs)
472 int ret = PQI_STATUS_SUCCESS;
473 pqi_reset_reg_t reset_reg;
474 int pqi_reset_timeout = 0;
476 uint32_t max_timeout = 0;
478 val = PCI_MEM_GET64(softs, &softs->pqi_reg->pqi_dev_adminq_cap, PQI_ADMINQ_CAP);
480 max_timeout = (val & 0xFFFF00000000) >> 32;
482 DBG_INFO("max_timeout for PQI reset completion in 100 msec units = %u\n", max_timeout);
485 if (pqi_reset_timeout++ == max_timeout) {
486 return PQI_STATUS_TIMEOUT;
488 OS_SLEEP(PQI_RESET_POLL_INTERVAL);/* 100 msec */
489 reset_reg.all_bits = PCI_MEM_GET32(softs,
490 &softs->pqi_reg->dev_reset, PQI_DEV_RESET);
491 if (reset_reg.bits.reset_action == PQI_RESET_ACTION_COMPLETED)
499 * Function used to perform PQI hard reset.
501 int pqi_reset(pqisrc_softstate_t *softs)
503 int ret = PQI_STATUS_SUCCESS;
505 pqi_reset_reg_t pqi_reset_reg;
509 if (true == softs->ctrl_in_pqi_mode) {
511 if (softs->pqi_reset_quiesce_allowed) {
512 val = PCI_MEM_GET32(softs, &softs->ioa_reg->host_to_ioa_db,
514 val |= SIS_PQI_RESET_QUIESCE;
515 PCI_MEM_PUT32(softs, &softs->ioa_reg->host_to_ioa_db,
516 LEGACY_SIS_IDBR, LE_32(val));
517 ret = pqisrc_sis_wait_for_db_bit_to_clear(softs, SIS_PQI_RESET_QUIESCE);
519 DBG_ERR("failed with error %d during quiesce\n", ret);
524 pqi_reset_reg.all_bits = 0;
525 pqi_reset_reg.bits.reset_type = PQI_RESET_TYPE_HARD_RESET;
526 pqi_reset_reg.bits.reset_action = PQI_RESET_ACTION_RESET;
528 PCI_MEM_PUT32(softs, &softs->pqi_reg->dev_reset, PQI_DEV_RESET,
529 LE_32(pqi_reset_reg.all_bits));
531 ret = pqisrc_wait_for_pqi_reset_completion(softs);
533 DBG_ERR("PQI reset timed out: ret = %d!\n", ret);
537 softs->ctrl_in_pqi_mode = false;
543 * Initialize the adapter with supported PQI configuration.
545 int pqisrc_pqi_init(pqisrc_softstate_t *softs)
547 int ret = PQI_STATUS_SUCCESS;
551 /* Check the PQI signature */
552 ret = pqisrc_check_pqimode(softs);
554 DBG_ERR("failed to switch to pqi\n");
558 PQI_SAVE_CTRL_MODE(softs, CTRL_PQI_MODE);
559 softs->ctrl_in_pqi_mode = true;
561 /* Get the No. of Online CPUs,NUMA/Processor config from OS */
562 ret = os_get_processor_config(softs);
564 DBG_ERR("Failed to get processor config from OS %d\n",
569 /* Get the interrupt count, type, priority available from OS */
570 ret = os_get_intr_config(softs);
572 DBG_ERR("Failed to get interrupt config from OS %d\n",
577 /* Create Admin Queue pair*/
578 ret = pqisrc_create_admin_queue(softs);
580 DBG_ERR("Failed to configure admin queue\n");
581 goto err_admin_queue;
584 /* For creating event and IO operational queues we have to submit
585 admin IU requests.So Allocate resources for submitting IUs */
587 /* Allocate the request container block (rcb) */
588 ret = pqisrc_allocate_rcb(softs);
589 if (ret == PQI_STATUS_FAILURE) {
590 DBG_ERR("Failed to allocate rcb \n");
594 /* Allocate & initialize request id queue */
595 ret = pqisrc_init_taglist(softs,&softs->taglist,
596 softs->max_outstanding_io);
598 DBG_ERR("Failed to allocate memory for request id q : %d\n",
603 ret = pqisrc_configure_op_queues(softs);
605 DBG_ERR("Failed to configure op queue\n");
609 /* Create Operational queues */
610 ret = pqisrc_create_op_queues(softs);
612 DBG_ERR("Failed to create op queue\n");
613 ret = PQI_STATUS_FAILURE;
617 softs->ctrl_online = true;
624 pqisrc_destroy_taglist(softs,&softs->taglist);
626 pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
628 pqisrc_destroy_admin_queue(softs);
630 os_free_intr_config(softs);
632 DBG_FUNC("OUT failed\n");
633 return PQI_STATUS_FAILURE;
637 int pqisrc_force_sis(pqisrc_softstate_t *softs)
639 int ret = PQI_STATUS_SUCCESS;
641 if (SIS_IS_KERNEL_PANIC(softs)) {
642 DBG_INFO("Controller FW is not runnning");
643 return PQI_STATUS_FAILURE;
646 if (PQI_GET_CTRL_MODE(softs) == CTRL_SIS_MODE) {
650 if (SIS_IS_KERNEL_UP(softs)) {
651 PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
654 /* Disable interrupts ? */
655 sis_disable_msix(softs);
657 /* reset pqi, this will delete queues */
658 ret = pqi_reset(softs);
663 ret = pqisrc_reenable_sis(softs);
668 PQI_SAVE_CTRL_MODE(softs, CTRL_SIS_MODE);
674 * Uninitialize the resources used during PQI initialization.
676 void pqisrc_pqi_uninit(pqisrc_softstate_t *softs)
681 if(softs->devlist_lockcreated==true){
682 os_uninit_spinlock(&softs->devlist_lock);
683 softs->devlist_lockcreated = false;
686 for (i = 0; i < softs->num_op_raid_ibq; i++) {
688 if(softs->op_raid_ib_q[i].lockcreated==true){
689 OS_UNINIT_PQILOCK(&softs->op_raid_ib_q[i].lock);
690 softs->op_raid_ib_q[i].lockcreated = false;
694 if(softs->op_aio_ib_q[i].lockcreated==true){
695 OS_UNINIT_PQILOCK(&softs->op_aio_ib_q[i].lock);
696 softs->op_aio_ib_q[i].lockcreated = false;
701 os_dma_mem_free(softs, &softs->op_ibq_dma_mem);
702 os_dma_mem_free(softs, &softs->op_obq_dma_mem);
703 os_dma_mem_free(softs, &softs->event_q_dma_mem);
705 /* Complete all pending commands. */
706 os_complete_outstanding_cmds_nodevice(softs);
709 pqisrc_free_rcb(softs, softs->max_outstanding_io + 1);
711 /* Free request id lists */
712 pqisrc_destroy_taglist(softs,&softs->taglist);
714 if(softs->admin_ib_queue.lockcreated==true){
715 OS_UNINIT_PQILOCK(&softs->admin_ib_queue.lock);
716 softs->admin_ib_queue.lockcreated = false;
719 /* Free Admin Queue */
720 os_dma_mem_free(softs, &softs->admin_queue_dma_mem);
722 /* Switch back to SIS mode */
723 if (pqisrc_force_sis(softs)) {
724 DBG_ERR("Failed to switch back the adapter to SIS mode!\n");
731 * Function to initialize the adapter settings.
733 int pqisrc_init(pqisrc_softstate_t *softs)
740 check_struct_sizes();
742 /* Init the Sync interface */
743 ret = pqisrc_sis_init(softs);
745 DBG_ERR("SIS Init failed with error %d\n", ret);
749 /* Init the PQI interface */
750 ret = pqisrc_pqi_init(softs);
752 DBG_ERR("PQI Init failed with error %d\n", ret);
756 /* Setup interrupt */
757 ret = os_setup_intr(softs);
759 DBG_ERR("Interrupt setup failed with error %d\n", ret);
763 /* Report event configuration */
764 ret = pqisrc_report_event_config(softs);
766 DBG_ERR(" Failed to configure Report events\n");
770 /* Set event configuration*/
771 ret = pqisrc_set_event_config(softs);
773 DBG_ERR(" Failed to configure Set events\n");
777 /* Check for For PQI spanning */
778 ret = pqisrc_get_ctrl_fw_version(softs);
780 DBG_ERR(" Failed to get ctrl fw version\n");
784 /* update driver version in to FW */
785 ret = pqisrc_write_driver_version_to_host_wellness(softs);
787 DBG_ERR(" Failed to update driver version in to FW");
788 goto err_host_wellness;
792 os_strlcpy(softs->devlist_lock_name, "devlist_lock", LOCKNAME_SIZE);
793 ret = os_init_spinlock(softs, &softs->devlist_lock, softs->devlist_lock_name);
795 DBG_ERR(" Failed to initialize devlist_lock\n");
796 softs->devlist_lockcreated=false;
799 softs->devlist_lockcreated = true;
801 ret = os_create_semaphore("scan_lock", 1, &softs->scan_lock);
802 if(ret != PQI_STATUS_SUCCESS){
803 DBG_ERR(" Failed to initialize scan lock\n");
807 OS_ATOMIC64_SET(softs, num_intrs, 0);
808 softs->prev_num_intrs = softs->num_intrs;
811 /* Get the PQI configuration table to read heart-beat counter*/
812 if (PQI_NEW_HEARTBEAT_MECHANISM(softs)) {
813 ret = pqisrc_process_config_table(softs);
815 DBG_ERR("Failed to process PQI configuration table %d\n", ret);
820 if (PQI_NEW_HEARTBEAT_MECHANISM(softs))
821 softs->prev_heartbeat_count = CTRLR_HEARTBEAT_CNT(softs) - OS_FW_HEARTBEAT_TIMER_INTERVAL;
823 /* Init device list */
824 for(i = 0; i < PQI_MAX_DEVICES; i++)
825 for(j = 0; j < PQI_MAX_MULTILUN; j++)
826 softs->device_list[i][j] = NULL;
832 os_destroy_semaphore(&softs->scan_lock);
834 if(softs->devlist_lockcreated==true){
835 os_uninit_spinlock(&softs->devlist_lock);
836 softs->devlist_lockcreated = false;
842 os_destroy_intr(softs);
844 pqisrc_pqi_uninit(softs);
846 pqisrc_sis_uninit(softs);
848 DBG_FUNC("OUT failed\n");
853 * Write all data in the adapter's battery-backed cache to
856 int pqisrc_flush_cache( pqisrc_softstate_t *softs,
857 enum pqisrc_flush_cache_event_type event_type)
859 int rval = PQI_STATUS_SUCCESS;
860 pqisrc_raid_req_t request;
861 pqisrc_bmic_flush_cache_t *flush_buff = NULL;
865 if (pqisrc_ctrl_offline(softs))
866 return PQI_STATUS_FAILURE;
868 flush_buff = os_mem_alloc(softs, sizeof(pqisrc_bmic_flush_cache_t));
870 DBG_ERR("Failed to allocate memory for flush cache params\n");
871 rval = PQI_STATUS_FAILURE;
875 flush_buff->halt_event = event_type;
877 memset(&request, 0, sizeof(request));
879 rval = pqisrc_build_send_raid_request(softs, &request, flush_buff,
880 sizeof(*flush_buff), SA_CACHE_FLUSH, 0,
881 (uint8_t *)RAID_CTLR_LUNID, NULL);
883 DBG_ERR("error in build send raid req ret=%d\n", rval);
887 os_mem_free(softs, (void *)flush_buff,
888 sizeof(pqisrc_bmic_flush_cache_t));
896 * Uninitialize the adapter.
898 void pqisrc_uninit(pqisrc_softstate_t *softs)
902 os_destroy_intr(softs);
904 os_destroy_semaphore(&softs->scan_lock);
906 pqisrc_pqi_uninit(softs);
908 pqisrc_sis_uninit(softs);
910 pqisrc_cleanup_devices(softs);