2 * Copyright (c) 2018 Microsemi Corporation.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include "smartpqi_includes.h"
31 /* Validate the scsi sense response code */
32 static inline boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
41 return (sshdr->response_code & 0x70) == 0x70;
44 /* Initialize target ID pool for HBA/PDs */
45 void pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
47 int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1;
49 for(i = 0; i < PQI_MAX_PHYSICALS; i++) {
50 softs->tid_pool.tid[i] = tid--;
52 softs->tid_pool.index = i - 1;
55 int pqisrc_alloc_tid(pqisrc_softstate_t *softs)
57 if(softs->tid_pool.index <= -1) {
58 DBG_ERR("Target ID exhausted\n");
62 return softs->tid_pool.tid[softs->tid_pool.index--];
65 void pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
67 if(softs->tid_pool.index >= PQI_MAX_PHYSICALS) {
68 DBG_ERR("Target ID queue is full\n");
72 softs->tid_pool.index++;
73 softs->tid_pool.tid[softs->tid_pool.index] = tid;
76 /* Update scsi sense info to a local buffer*/
77 boolean_t pqisrc_update_scsi_sense(const uint8_t *buff, int len,
78 struct sense_header_scsi *header)
86 memset(header, 0, sizeof(struct sense_header_scsi));
88 header->response_code = (buff[0] & 0x7f);
90 if (!pqisrc_scsi_sense_valid(header))
93 if (header->response_code >= 0x72) {
94 /* descriptor format */
96 header->sense_key = (buff[1] & 0xf);
98 header->asc = buff[2];
100 header->ascq = buff[3];
102 header->additional_length = buff[7];
106 header->sense_key = (buff[2] & 0xf);
108 len = (len < (buff[7] + 8)) ?
111 header->asc = buff[12];
113 header->ascq = buff[13];
123 * Function used to build the internal raid request and analyze the response
125 int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs, pqisrc_raid_req_t *request,
126 void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
127 raid_path_error_info_elem_t *error_info)
131 int ret = PQI_STATUS_SUCCESS;
133 struct dma_mem device_mem;
136 ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
137 ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
143 memset(&device_mem, 0, sizeof(struct dma_mem));
145 /* for TUR datasize: 0 buff: NULL */
147 device_mem.tag = "device_mem";
148 device_mem.size = datasize;
149 device_mem.align = PQISRC_DEFAULT_DMA_ALIGN;
151 ret = os_dma_mem_alloc(softs, &device_mem);
154 DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
158 sgd = (sgt_t *)&request->sg_descriptors[0];
160 sgd->addr = device_mem.dma_addr;
162 sgd->flags = SG_FLAG_LAST;
166 /* Build raid path request */
167 request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
169 request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t,
170 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH);
171 request->buffer_length = LE_32(datasize);
172 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
173 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
174 request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
180 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
182 if (vpd_page & VPD_PAGE) {
184 cdb[2] = (uint8_t)vpd_page;
186 cdb[4] = (uint8_t)datasize;
190 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
192 if (cmd == SA_REPORT_PHYS)
193 cdb[1] = SA_REPORT_PHYS_EXTENDED;
195 cdb[1] = SA_REPORT_LOG_EXTENDED;
196 cdb[8] = (uint8_t)((datasize) >> 8);
197 cdb[9] = (uint8_t)datasize;
199 case TEST_UNIT_READY:
200 request->data_direction = SOP_DATA_DIR_NONE;
202 case SA_GET_RAID_MAP:
203 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
204 cdb[0] = SA_CISS_READ;
206 cdb[8] = (uint8_t)((datasize) >> 8);
207 cdb[9] = (uint8_t)datasize;
210 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
211 memcpy(device_mem.virt_addr, buff, datasize);
213 cdb[6] = BMIC_CACHE_FLUSH;
214 cdb[7] = (uint8_t)((datasize) << 8);
215 cdb[8] = (uint8_t)((datasize) >> 8);
217 case BMIC_IDENTIFY_CONTROLLER:
218 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
219 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
222 cdb[7] = (uint8_t)((datasize) << 8);
223 cdb[8] = (uint8_t)((datasize) >> 8);
225 case BMIC_WRITE_HOST_WELLNESS:
226 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
227 memcpy(device_mem.virt_addr, buff, datasize);
230 cdb[7] = (uint8_t)((datasize) << 8);
231 cdb[8] = (uint8_t)((datasize) >> 8);
233 case BMIC_SENSE_SUBSYSTEM_INFORMATION:
234 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
237 cdb[7] = (uint8_t)((datasize) << 8);
238 cdb[8] = (uint8_t)((datasize) >> 8);
241 DBG_ERR("unknown command 0x%x", cmd);
245 tag = pqisrc_get_tag(&softs->taglist);
246 if (INVALID_ELEM == tag) {
247 DBG_ERR("Tag not available\n");
248 ret = PQI_STATUS_FAILURE;
252 ((pqisrc_raid_req_t *)request)->request_id = tag;
253 ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id;
254 ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id;
255 rcb = &softs->rcb[tag];
256 rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
257 rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
259 rcb->req_pending = true;
262 ret = pqisrc_submit_cmnd(softs, ib_q, request);
264 if (ret != PQI_STATUS_SUCCESS) {
265 DBG_ERR("Unable to submit command\n");
269 ret = pqisrc_wait_on_condition(softs, rcb);
270 if (ret != PQI_STATUS_SUCCESS) {
271 DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
277 memcpy(buff, device_mem.virt_addr, datasize);
279 os_dma_mem_free(softs, &device_mem);
287 sizeof(*error_info));
289 if (error_info->data_out_result ==
290 PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
291 ret = PQI_STATUS_SUCCESS;
294 DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x,"
295 "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
296 BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
298 ret = PQI_STATUS_FAILURE;
303 ret = PQI_STATUS_SUCCESS;
304 memset(error_info, 0, sizeof(*error_info));
309 pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
314 DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n",
315 BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
318 pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
321 os_dma_mem_free(softs, &device_mem);
322 DBG_FUNC("FAILED \n");
326 /* common function used to send report physical and logical luns cmnds*/
327 static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
328 void *buff, size_t buf_len)
331 pqisrc_raid_req_t request;
335 memset(&request, 0, sizeof(request));
336 ret = pqisrc_build_send_raid_request(softs, &request, buff,
337 buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
344 /* subroutine used to get physical and logical luns of the device */
345 static int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
346 reportlun_data_ext_t **buff, size_t *data_length)
351 size_t new_lun_list_length;
352 reportlun_data_ext_t *lun_data;
353 reportlun_header_t report_lun_header;
357 ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
358 sizeof(report_lun_header));
361 DBG_ERR("failed return code: %d\n", ret);
364 list_len = BE_32(report_lun_header.list_length);
367 data_len = sizeof(reportlun_header_t) + list_len;
368 *data_length = data_len;
370 lun_data = os_mem_alloc(softs, data_len);
373 DBG_ERR("failed to allocate memory for lun_data\n");
374 return PQI_STATUS_FAILURE;
378 DBG_DISC("list_len is 0\n");
379 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
383 ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
390 new_lun_list_length = BE_32(lun_data->header.list_length);
392 if (new_lun_list_length > list_len) {
393 list_len = new_lun_list_length;
394 os_mem_free(softs, (void *)lun_data, data_len);
404 os_mem_free(softs, (void *)lun_data, data_len);
410 * Function used to get physical and logical device list
412 static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
413 reportlun_data_ext_t **physical_dev_list,
414 reportlun_data_ext_t **logical_dev_list,
415 size_t *phys_data_length,
416 size_t *log_data_length)
418 int ret = PQI_STATUS_SUCCESS;
419 size_t logical_list_length;
420 size_t logdev_data_length;
422 reportlun_data_ext_t *local_logdev_list;
423 reportlun_data_ext_t *logdev_data;
424 reportlun_header_t report_lun_header;
429 ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length);
431 DBG_ERR("report physical LUNs failed");
435 ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length);
437 DBG_ERR("report logical LUNs failed");
442 logdev_data = *logical_dev_list;
445 logical_list_length =
446 BE_32(logdev_data->header.list_length);
448 memset(&report_lun_header, 0, sizeof(report_lun_header));
450 (reportlun_data_ext_t *)&report_lun_header;
451 logical_list_length = 0;
454 logdev_data_length = sizeof(reportlun_header_t) +
457 /* Adding LOGICAL device entry for controller */
458 local_logdev_list = os_mem_alloc(softs,
459 logdev_data_length + sizeof(reportlun_ext_entry_t));
460 if (!local_logdev_list) {
461 data_length = *log_data_length;
462 os_mem_free(softs, (char *)*logical_dev_list, data_length);
463 *logical_dev_list = NULL;
464 return PQI_STATUS_FAILURE;
467 memcpy(local_logdev_list, logdev_data, logdev_data_length);
468 memset((uint8_t *)local_logdev_list + logdev_data_length, 0,
469 sizeof(reportlun_ext_entry_t));
470 local_logdev_list->header.list_length = BE_32(logical_list_length +
471 sizeof(reportlun_ext_entry_t));
472 data_length = *log_data_length;
473 os_mem_free(softs, (char *)*logical_dev_list, data_length);
474 *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t);
475 *logical_dev_list = local_logdev_list;
482 /* Subroutine used to set Bus-Target-Lun for the requested device */
483 static inline void pqisrc_set_btl(pqi_scsi_dev_t *device,
484 int bus, int target, int lun)
489 device->target = target;
495 inline boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
497 return device->is_external_raid_device;
500 static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
502 return scsi3addr[2] != 0;
505 /* Function used to assign Bus-Target-Lun for the requested device */
506 static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
515 scsi3addr = device->scsi3addr;
516 lunid = GET_LE32(scsi3addr);
518 if (pqisrc_is_hba_lunid(scsi3addr)) {
519 /* The specified device is the controller. */
520 pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, lunid & 0x3fff);
521 device->target_lun_valid = true;
525 if (pqisrc_is_logical_device(device)) {
526 if (pqisrc_is_external_raid_device(device)) {
527 DBG_DISC("External Raid Device!!!");
528 bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
529 target = (lunid >> 16) & 0x3fff;
532 bus = PQI_RAID_VOLUME_BUS;
534 target = lunid & 0x3fff;
536 pqisrc_set_btl(device, bus, target, lun);
537 device->target_lun_valid = true;
544 /* Build and send the internal INQUIRY command to particular device */
545 static int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
546 uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
548 int ret = PQI_STATUS_SUCCESS;
549 pqisrc_raid_req_t request;
550 raid_path_error_info_elem_t error_info;
554 memset(&request, 0, sizeof(request));
555 ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
556 SA_INQUIRY, vpd_page, scsi3addr, &error_info);
562 /* Function used to parse the sense information from response */
563 static void pqisrc_fetch_sense_info(const uint8_t *sense_data,
564 unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
566 struct sense_header_scsi header;
574 if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) {
575 *sense_key = header.sense_key;
580 DBG_DISC("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq);
585 /* Function used to validate volume offline status */
586 static uint8_t pqisrc_get_volume_offline_status(pqisrc_softstate_t *softs,
589 int ret = PQI_STATUS_SUCCESS;
590 uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
592 uint8_t *buff = NULL;
596 buff = os_mem_alloc(softs, 64);
598 return PQI_STATUS_FAILURE;
600 /* Get the size of the VPD return buff. */
601 ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
602 buff, SCSI_VPD_HEADER_LENGTH);
609 /* Now get the whole VPD buff. */
610 ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
611 buff, size + SCSI_VPD_HEADER_LENGTH);
618 os_mem_free(softs, (char *)buff, 64);
625 /* Determine offline status of a volume. Returns appropriate SA_LV_* status.*/
626 static uint8_t pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
629 int ret = PQI_STATUS_SUCCESS;
631 unsigned sense_data_len;
637 pqisrc_raid_req_t request;
638 raid_path_error_info_elem_t error_info;
642 memset(&request, 0, sizeof(request));
643 ret = pqisrc_build_send_raid_request(softs, &request, NULL, 0,
644 TEST_UNIT_READY, 0, scsi3addr, &error_info);
648 sense_data = error_info.data;
649 sense_data_len = LE_16(error_info.sense_data_len);
651 if (sense_data_len > sizeof(error_info.data))
652 sense_data_len = sizeof(error_info.data);
654 pqisrc_fetch_sense_info(sense_data, sense_data_len, &sense_key, &asc,
657 scsi_status = error_info.status;
659 /* scsi status: "CHECK CONDN" / SK: "not ready" ? */
660 if (scsi_status != 2 ||
662 asc != ASC_LUN_NOT_READY) {
666 /* Determine the reason for not ready state. */
667 off_status = pqisrc_get_volume_offline_status(softs, scsi3addr);
669 DBG_DISC("offline_status 0x%x\n", off_status);
671 /* Keep volume offline in certain cases. */
672 switch (off_status) {
673 case SA_LV_UNDERGOING_ERASE:
674 case SA_LV_NOT_AVAILABLE:
675 case SA_LV_UNDERGOING_RPI:
676 case SA_LV_PENDING_RPI:
677 case SA_LV_ENCRYPTED_NO_KEY:
678 case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
679 case SA_LV_UNDERGOING_ENCRYPTION:
680 case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
681 case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
683 case SA_LV_STATUS_VPD_UNSUPPORTED:
685 * If the VPD status page isn't available,
686 * use ASC/ASCQ to determine state.
688 if (ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS ||
689 ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)
699 return SA_LV_STATUS_VPD_UNSUPPORTED;
702 /* Validate the RAID map parameters */
703 static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
704 pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
707 uint32_t raidmap_size;
708 uint32_t r5or6_blocks_per_row;
709 unsigned phys_dev_num;
710 unsigned num_raidmap_entries;
714 raidmap_size = LE_32(raid_map->structure_size);
715 if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) {
716 error_msg = "RAID map too small\n";
720 if (raidmap_size > sizeof(*raid_map)) {
721 error_msg = "RAID map too large\n";
725 phys_dev_num = LE_16(raid_map->layout_map_count) *
726 (LE_16(raid_map->data_disks_per_row) +
727 LE_16(raid_map->metadata_disks_per_row));
728 num_raidmap_entries = phys_dev_num *
729 LE_16(raid_map->row_cnt);
731 if (num_raidmap_entries > RAID_MAP_MAX_ENTRIES) {
732 error_msg = "invalid number of map entries in RAID map\n";
736 if (device->raid_level == SA_RAID_1) {
737 if (LE_16(raid_map->layout_map_count) != 2) {
738 error_msg = "invalid RAID-1 map\n";
741 } else if (device->raid_level == SA_RAID_ADM) {
742 if (LE_16(raid_map->layout_map_count) != 3) {
743 error_msg = "invalid RAID-1(ADM) map\n";
746 } else if ((device->raid_level == SA_RAID_5 ||
747 device->raid_level == SA_RAID_6) &&
748 LE_16(raid_map->layout_map_count) > 1) {
750 r5or6_blocks_per_row =
751 LE_16(raid_map->strip_size) *
752 LE_16(raid_map->data_disks_per_row);
753 if (r5or6_blocks_per_row == 0) {
754 error_msg = "invalid RAID-5 or RAID-6 map\n";
764 DBG_ERR("%s\n", error_msg);
765 return PQI_STATUS_FAILURE;
768 /* Get device raidmap for the requested device */
769 static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
770 pqi_scsi_dev_t *device)
772 int ret = PQI_STATUS_SUCCESS;
773 pqisrc_raid_req_t request;
774 pqisrc_raid_map_t *raid_map;
778 raid_map = os_mem_alloc(softs, sizeof(*raid_map));
780 return PQI_STATUS_FAILURE;
782 memset(&request, 0, sizeof(request));
783 ret = pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map),
784 SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
787 DBG_ERR("error in build send raid req ret=%d\n", ret);
791 ret = pqisrc_raid_map_validation(softs, device, raid_map);
793 DBG_ERR("error in raid map validation ret=%d\n", ret);
797 device->raid_map = raid_map;
802 os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
803 DBG_FUNC("FAILED \n");
807 /* Get device ioaccel_status to validate the type of device */
808 static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
809 pqi_scsi_dev_t *device)
811 int ret = PQI_STATUS_SUCCESS;
813 uint8_t ioaccel_status;
817 buff = os_mem_alloc(softs, 64);
821 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
822 VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64);
824 DBG_ERR("error in send scsi inquiry ret=%d\n", ret);
828 ioaccel_status = buff[IOACCEL_STATUS_BYTE];
829 device->offload_config =
830 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
832 if (device->offload_config) {
833 device->offload_enabled_pending =
834 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
835 if (pqisrc_get_device_raidmap(softs, device))
836 device->offload_enabled_pending = false;
839 DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n",
840 device->offload_config, device->offload_enabled_pending);
843 os_mem_free(softs, (char*)buff, 64);
847 /* Get RAID level of requested device */
848 static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs,
849 pqi_scsi_dev_t *device)
856 raid_level = SA_RAID_UNKNOWN;
858 buff = os_mem_alloc(softs, 64);
861 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
862 VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64);
864 raid_level = buff[8];
865 if (raid_level > SA_RAID_MAX)
866 raid_level = SA_RAID_UNKNOWN;
868 os_mem_free(softs, (char*)buff, 64);
871 device->raid_level = raid_level;
872 DBG_DISC("RAID LEVEL: %x \n", raid_level);
876 /* Parse the inquiry response and determine the type of device */
877 static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
878 pqi_scsi_dev_t *device)
880 int ret = PQI_STATUS_SUCCESS;
885 inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE);
887 return PQI_STATUS_FAILURE;
889 /* Send an inquiry to the device to see what it is. */
890 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
894 pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
895 pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
897 device->devtype = inq_buff[0] & 0x1f;
898 memcpy(device->vendor, &inq_buff[8],
899 sizeof(device->vendor));
900 memcpy(device->model, &inq_buff[16],
901 sizeof(device->model));
902 DBG_DISC("DEV_TYPE: %x VENDOR: %s MODEL: %s\n", device->devtype, device->vendor, device->model);
904 if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
905 if (pqisrc_is_external_raid_device(device)) {
906 device->raid_level = SA_RAID_UNKNOWN;
907 device->volume_status = SA_LV_OK;
908 device->volume_offline = false;
911 pqisrc_get_dev_raid_level(softs, device);
912 pqisrc_get_dev_ioaccel_status(softs, device);
913 device->volume_status = pqisrc_get_dev_vol_status(softs,
915 device->volume_offline = device->volume_status != SA_LV_OK;
920 * Check if this is a One-Button-Disaster-Recovery device
921 * by looking for "$DR-10" at offset 43 in the inquiry data.
923 device->is_obdr_device = (device->devtype == ROM_DEVICE &&
924 memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG,
927 os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE);
934 * BMIC (Basic Management And Interface Commands) command
935 * to get the controller identify params
937 static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs,
938 bmic_ident_ctrl_t *buff)
940 int ret = PQI_STATUS_SUCCESS;
941 pqisrc_raid_req_t request;
945 memset(&request, 0, sizeof(request));
946 ret = pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff),
947 BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
953 /* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
954 int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
956 int ret = PQI_STATUS_SUCCESS;
957 bmic_ident_ctrl_t *identify_ctrl;
961 identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl));
962 if (!identify_ctrl) {
963 DBG_ERR("failed to allocate memory for identify_ctrl\n");
964 return PQI_STATUS_FAILURE;
967 memset(identify_ctrl, 0, sizeof(*identify_ctrl));
969 ret = pqisrc_identify_ctrl(softs, identify_ctrl);
973 softs->fw_build_number = identify_ctrl->fw_build_number;
974 memcpy(softs->fw_version, identify_ctrl->fw_version,
975 sizeof(identify_ctrl->fw_version));
976 softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0';
977 snprintf(softs->fw_version +
978 strlen(softs->fw_version),
979 sizeof(softs->fw_version),
980 "-%u", identify_ctrl->fw_build_number);
982 os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
983 DBG_INIT("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
988 /* BMIC command to determine scsi device identify params */
989 static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
990 pqi_scsi_dev_t *device,
991 bmic_ident_physdev_t *buff,
994 int ret = PQI_STATUS_SUCCESS;
995 uint16_t bmic_device_index;
996 pqisrc_raid_req_t request;
1001 memset(&request, 0, sizeof(request));
1002 bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
1003 request.cdb[2] = (uint8_t)bmic_device_index;
1004 request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
1006 ret = pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
1007 BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1013 * Function used to get the scsi device information using one of BMIC
1014 * BMIC_IDENTIFY_PHYSICAL_DEVICE
1016 static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
1017 pqi_scsi_dev_t *device,
1018 bmic_ident_physdev_t *id_phys)
1020 int ret = PQI_STATUS_SUCCESS;
1023 memset(id_phys, 0, sizeof(*id_phys));
1025 ret= pqisrc_identify_physical_disk(softs, device,
1026 id_phys, sizeof(*id_phys));
1028 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1032 device->queue_depth =
1033 LE_16(id_phys->current_queue_depth_limit);
1034 device->device_type = id_phys->device_type;
1035 device->active_path_index = id_phys->active_path_number;
1036 device->path_map = id_phys->redundant_path_present_map;
1037 memcpy(&device->box,
1038 &id_phys->alternate_paths_phys_box_on_port,
1039 sizeof(device->box));
1040 memcpy(&device->phys_connector,
1041 &id_phys->alternate_paths_phys_connector,
1042 sizeof(device->phys_connector));
1043 device->bay = id_phys->phys_bay_in_box;
1045 DBG_DISC("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n", device->device_type, device->queue_depth);
1050 /* Function used to find the entry of the device in a list */
1051 static device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
1052 pqi_scsi_dev_t *device_to_find,
1053 pqi_scsi_dev_t **same_device)
1055 pqi_scsi_dev_t *device;
1058 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1059 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1060 if(softs->device_list[i][j] == NULL)
1062 device = softs->device_list[i][j];
1063 if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
1064 device->scsi3addr)) {
1065 *same_device = device;
1066 if (pqisrc_device_equal(device_to_find, device)) {
1067 if (device_to_find->volume_offline)
1068 return DEVICE_CHANGED;
1069 return DEVICE_UNCHANGED;
1071 return DEVICE_CHANGED;
1077 return DEVICE_NOT_FOUND;
1081 /* Update the newly added devices as existed device */
1082 static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
1083 pqi_scsi_dev_t *device_exist,
1084 pqi_scsi_dev_t *new_device)
1087 device_exist->expose_device = new_device->expose_device;
1088 memcpy(device_exist->vendor, new_device->vendor,
1089 sizeof(device_exist->vendor));
1090 memcpy(device_exist->model, new_device->model,
1091 sizeof(device_exist->model));
1092 device_exist->is_physical_device = new_device->is_physical_device;
1093 device_exist->is_external_raid_device =
1094 new_device->is_external_raid_device;
1095 device_exist->sas_address = new_device->sas_address;
1096 device_exist->raid_level = new_device->raid_level;
1097 device_exist->queue_depth = new_device->queue_depth;
1098 device_exist->ioaccel_handle = new_device->ioaccel_handle;
1099 device_exist->volume_status = new_device->volume_status;
1100 device_exist->active_path_index = new_device->active_path_index;
1101 device_exist->path_map = new_device->path_map;
1102 device_exist->bay = new_device->bay;
1103 memcpy(device_exist->box, new_device->box,
1104 sizeof(device_exist->box));
1105 memcpy(device_exist->phys_connector, new_device->phys_connector,
1106 sizeof(device_exist->phys_connector));
1107 device_exist->offload_config = new_device->offload_config;
1108 device_exist->offload_enabled = false;
1109 device_exist->offload_enabled_pending =
1110 new_device->offload_enabled_pending;
1111 device_exist->offload_to_mirror = 0;
1112 if (device_exist->raid_map)
1114 (char *)device_exist->raid_map,
1115 sizeof(*device_exist->raid_map));
1116 device_exist->raid_map = new_device->raid_map;
1117 /* To prevent this from being freed later. */
1118 new_device->raid_map = NULL;
1122 /* Validate the ioaccel_handle for a newly added device */
1123 static pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
1124 pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
1126 pqi_scsi_dev_t *device;
1129 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1130 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1131 if(softs->device_list[i][j] == NULL)
1133 device = softs->device_list[i][j];
1134 if (device->devtype != DISK_DEVICE)
1136 if (pqisrc_is_logical_device(device))
1138 if (device->ioaccel_handle == ioaccel_handle)
1147 /* Get the scsi device queue depth */
1148 static void pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
1151 unsigned phys_dev_num;
1152 unsigned num_raidmap_entries;
1153 unsigned queue_depth;
1154 pqisrc_raid_map_t *raid_map;
1155 pqi_scsi_dev_t *device;
1156 raidmap_data_t *dev_data;
1157 pqi_scsi_dev_t *phys_disk;
1163 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1164 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1165 if(softs->device_list[i][j] == NULL)
1167 device = softs->device_list[i][j];
1168 if (device->devtype != DISK_DEVICE)
1170 if (!pqisrc_is_logical_device(device))
1172 if (pqisrc_is_external_raid_device(device))
1174 device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1175 raid_map = device->raid_map;
1178 dev_data = raid_map->dev_data;
1179 phys_dev_num = LE_16(raid_map->layout_map_count) *
1180 (LE_16(raid_map->data_disks_per_row) +
1181 LE_16(raid_map->metadata_disks_per_row));
1182 num_raidmap_entries = phys_dev_num *
1183 LE_16(raid_map->row_cnt);
1186 for (k = 0; k < num_raidmap_entries; k++) {
1187 phys_disk = pqisrc_identify_device_via_ioaccel(softs,
1188 dev_data[k].ioaccel_handle);
1192 "Failed to find physical disk handle for logical drive %016llx\n",
1193 (unsigned long long)BE_64(device->scsi3addr[0]));
1194 device->offload_enabled = false;
1195 device->offload_enabled_pending = false;
1197 os_mem_free(softs, (char *)raid_map, sizeof(*raid_map));
1198 device->raid_map = NULL;
1202 queue_depth += phys_disk->queue_depth;
1205 device->queue_depth = queue_depth;
1206 } /* end inner loop */
1207 }/* end outer loop */
1211 /* Function used to add a scsi device to OS scsi subsystem */
1212 static int pqisrc_add_device(pqisrc_softstate_t *softs,
1213 pqi_scsi_dev_t *device)
1216 DBG_WARN("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1217 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1219 device->invalid = false;
1221 if(device->expose_device) {
1222 /* TBD: Call OS upper layer function to add the device entry */
1223 os_add_device(softs,device);
1226 return PQI_STATUS_SUCCESS;
1230 /* Function used to remove a scsi device from OS scsi subsystem */
1231 void pqisrc_remove_device(pqisrc_softstate_t *softs,
1232 pqi_scsi_dev_t *device)
1235 DBG_DISC("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1236 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1238 /* TBD: Call OS upper layer function to remove the device entry */
1239 device->invalid = true;
1240 os_remove_device(softs,device);
1246 * When exposing new device to OS fails then adjst list according to the
1249 static void pqisrc_adjust_list(pqisrc_softstate_t *softs,
1250 pqi_scsi_dev_t *device)
1255 DBG_ERR("softs = %p: device is NULL !!!\n", softs);
1259 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1260 softs->device_list[device->target][device->lun] = NULL;
1261 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1262 pqisrc_device_mem_free(softs, device);
1267 /* Debug routine used to display the RAID volume status of the device */
1268 static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
1269 pqi_scsi_dev_t *device)
1274 switch (device->volume_status) {
1276 status = "Volume is online.";
1278 case SA_LV_UNDERGOING_ERASE:
1279 status = "Volume is undergoing background erase process.";
1281 case SA_LV_NOT_AVAILABLE:
1282 status = "Volume is waiting for transforming volume.";
1284 case SA_LV_UNDERGOING_RPI:
1285 status = "Volume is undergoing rapid parity initialization process.";
1287 case SA_LV_PENDING_RPI:
1288 status = "Volume is queued for rapid parity initialization process.";
1290 case SA_LV_ENCRYPTED_NO_KEY:
1291 status = "Volume is encrypted and cannot be accessed because key is not present.";
1293 case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1294 status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.";
1296 case SA_LV_UNDERGOING_ENCRYPTION:
1297 status = "Volume is undergoing encryption process.";
1299 case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1300 status = "Volume is undergoing encryption re-keying process.";
1302 case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1303 status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled.";
1305 case SA_LV_PENDING_ENCRYPTION:
1306 status = "Volume is pending migration to encrypted state, but process has not started.";
1308 case SA_LV_PENDING_ENCRYPTION_REKEYING:
1309 status = "Volume is encrypted and is pending encryption rekeying.";
1311 case SA_LV_STATUS_VPD_UNSUPPORTED:
1312 status = "Volume status is not available through vital product data pages.";
1315 status = "Volume is in an unknown state.";
1319 DBG_DISC("scsi BTL %d:%d:%d %s\n",
1320 device->bus, device->target, device->lun, status);
1324 void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1329 if (device->raid_map) {
1330 os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t));
1332 os_mem_free(softs, (char *)device,sizeof(*device));
1337 /* OS should call this function to free the scsi device */
1338 void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
1341 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1342 if (!pqisrc_is_logical_device(device)) {
1343 pqisrc_free_tid(softs,device->target);
1345 pqisrc_device_mem_free(softs, device);
1346 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1351 /* Update the newly added devices to the device list */
1352 static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
1353 pqi_scsi_dev_t *new_device_list[], int num_new_devices)
1357 device_status_t dev_status;
1358 pqi_scsi_dev_t *device;
1359 pqi_scsi_dev_t *same_device;
1360 pqi_scsi_dev_t **added = NULL;
1361 pqi_scsi_dev_t **removed = NULL;
1362 int nadded = 0, nremoved = 0;
1368 added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES);
1369 removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES);
1371 if (!added || !removed) {
1372 DBG_WARN("Out of memory \n");
1376 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1378 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1379 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1380 if(softs->device_list[i][j] == NULL)
1382 device = softs->device_list[i][j];
1383 device->device_gone = true;
1386 DBG_IO("Device list used an array\n");
1387 for (i = 0; i < num_new_devices; i++) {
1388 device = new_device_list[i];
1390 dev_status = pqisrc_scsi_find_entry(softs, device,
1393 switch (dev_status) {
1394 case DEVICE_UNCHANGED:
1395 /* New Device present in existing device list */
1396 device->new_device = false;
1397 same_device->device_gone = false;
1398 pqisrc_exist_device_update(softs, same_device, device);
1400 case DEVICE_NOT_FOUND:
1401 /* Device not found in existing list */
1402 device->new_device = true;
1404 case DEVICE_CHANGED:
1405 /* Actual device gone need to add device to list*/
1406 device->new_device = true;
1412 /* Process all devices that have gone away. */
1413 for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) {
1414 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1415 if(softs->device_list[i][j] == NULL)
1417 device = softs->device_list[i][j];
1418 if (device->device_gone) {
1419 softs->device_list[device->target][device->lun] = NULL;
1420 removed[nremoved] = device;
1426 /* Process all new devices. */
1427 for (i = 0, nadded = 0; i < num_new_devices; i++) {
1428 device = new_device_list[i];
1429 if (!device->new_device)
1431 if (device->volume_offline)
1434 /* physical device */
1435 if (!pqisrc_is_logical_device(device)) {
1436 tid = pqisrc_alloc_tid(softs);
1437 if(INVALID_ELEM != tid)
1438 pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
1441 softs->device_list[device->target][device->lun] = device;
1442 DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device,
1443 device->bus,device->target,device->lun);
1444 /* To prevent this entry from being freed later. */
1445 new_device_list[i] = NULL;
1446 added[nadded] = device;
1450 pqisrc_update_log_dev_qdepth(softs);
1452 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1453 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1454 if(softs->device_list[i][j] == NULL)
1456 device = softs->device_list[i][j];
1457 device->offload_enabled = device->offload_enabled_pending;
1461 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1463 for(i = 0; i < nremoved; i++) {
1464 device = removed[i];
1467 pqisrc_remove_device(softs, device);
1468 pqisrc_display_device_info(softs, "removed", device);
1472 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1473 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1474 if(softs->device_list[i][j] == NULL)
1476 device = softs->device_list[i][j];
1478 * Notify the OS upper layer if the queue depth of any existing device has
1481 if (device->queue_depth !=
1482 device->advertised_queue_depth) {
1483 device->advertised_queue_depth = device->queue_depth;
1484 /* TBD: Call OS upper layer function to change device Q depth */
1488 for(i = 0; i < nadded; i++) {
1490 if (device->expose_device) {
1491 ret = pqisrc_add_device(softs, device);
1493 DBG_WARN("scsi %d:%d:%d addition failed, device not added\n",
1494 device->bus, device->target,
1496 pqisrc_adjust_list(softs, device);
1501 pqisrc_display_device_info(softs, "added", device);
1504 /* Process all volumes that are offline. */
1505 for (i = 0; i < num_new_devices; i++) {
1506 device = new_device_list[i];
1509 if (!device->new_device)
1511 if (device->volume_offline) {
1512 pqisrc_display_volume_status(softs, device);
1513 pqisrc_display_device_info(softs, "offline", device);
1519 os_mem_free(softs, (char *)added,
1520 sizeof(*added) * PQI_MAX_DEVICES);
1522 os_mem_free(softs, (char *)removed,
1523 sizeof(*removed) * PQI_MAX_DEVICES);
1529 * Let the Adapter know about driver version using one of BMIC
1530 * BMIC_WRITE_HOST_WELLNESS
1532 int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
1534 int rval = PQI_STATUS_SUCCESS;
1535 struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
1537 pqisrc_raid_req_t request;
1541 memset(&request, 0, sizeof(request));
1542 data_length = sizeof(*host_wellness_driver_ver);
1544 host_wellness_driver_ver = os_mem_alloc(softs, data_length);
1545 if (!host_wellness_driver_ver) {
1546 DBG_ERR("failed to allocate memory for host wellness driver_version\n");
1547 return PQI_STATUS_FAILURE;
1550 host_wellness_driver_ver->start_tag[0] = '<';
1551 host_wellness_driver_ver->start_tag[1] = 'H';
1552 host_wellness_driver_ver->start_tag[2] = 'W';
1553 host_wellness_driver_ver->start_tag[3] = '>';
1554 host_wellness_driver_ver->driver_version_tag[0] = 'D';
1555 host_wellness_driver_ver->driver_version_tag[1] = 'V';
1556 host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version));
1557 strncpy(host_wellness_driver_ver->driver_version, softs->os_name,
1558 sizeof(host_wellness_driver_ver->driver_version));
1559 if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) {
1560 strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION,
1561 sizeof(host_wellness_driver_ver->driver_version) - strlen(softs->os_name));
1563 DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n",
1564 strlen(softs->os_name));
1566 host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
1567 host_wellness_driver_ver->end_tag[0] = 'Z';
1568 host_wellness_driver_ver->end_tag[1] = 'Z';
1570 rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length,
1571 BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1573 os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
1580 * Write current RTC time from host to the adapter using
1581 * BMIC_WRITE_HOST_WELLNESS
1583 int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
1585 int rval = PQI_STATUS_SUCCESS;
1586 struct bmic_host_wellness_time *host_wellness_time;
1588 pqisrc_raid_req_t request;
1592 memset(&request, 0, sizeof(request));
1593 data_length = sizeof(*host_wellness_time);
1595 host_wellness_time = os_mem_alloc(softs, data_length);
1596 if (!host_wellness_time) {
1597 DBG_ERR("failed to allocate memory for host wellness time structure\n");
1598 return PQI_STATUS_FAILURE;
1601 host_wellness_time->start_tag[0] = '<';
1602 host_wellness_time->start_tag[1] = 'H';
1603 host_wellness_time->start_tag[2] = 'W';
1604 host_wellness_time->start_tag[3] = '>';
1605 host_wellness_time->time_tag[0] = 'T';
1606 host_wellness_time->time_tag[1] = 'D';
1607 host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) -
1608 offsetof(struct bmic_host_wellness_time, century));
1610 os_get_time(host_wellness_time);
1612 host_wellness_time->dont_write_tag[0] = 'D';
1613 host_wellness_time->dont_write_tag[1] = 'W';
1614 host_wellness_time->end_tag[0] = 'Z';
1615 host_wellness_time->end_tag[1] = 'Z';
1617 rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length,
1618 BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1620 os_mem_free(softs, (char *)host_wellness_time, data_length);
1627 * Function used to perform a rescan of scsi devices
1628 * for any config change events
1630 int pqisrc_scan_devices(pqisrc_softstate_t *softs)
1632 boolean_t is_physical_device;
1633 int ret = PQI_STATUS_FAILURE;
1636 int phy_log_dev_cnt;
1638 uint32_t physical_cnt;
1639 uint32_t logical_cnt;
1640 uint32_t ndev_allocated = 0;
1641 size_t phys_data_length, log_data_length;
1642 reportlun_data_ext_t *physical_dev_list = NULL;
1643 reportlun_data_ext_t *logical_dev_list = NULL;
1644 reportlun_ext_entry_t *lun_ext_entry = NULL;
1645 bmic_ident_physdev_t *bmic_phy_info = NULL;
1646 pqi_scsi_dev_t **new_device_list = NULL;
1647 pqi_scsi_dev_t *device = NULL;
1652 ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
1653 &phys_data_length, &log_data_length);
1658 physical_cnt = BE_32(physical_dev_list->header.list_length)
1659 / sizeof(physical_dev_list->lun_entries[0]);
1661 logical_cnt = BE_32(logical_dev_list->header.list_length)
1662 / sizeof(logical_dev_list->lun_entries[0]);
1664 DBG_DISC("physical_cnt %d logical_cnt %d\n", physical_cnt, logical_cnt);
1667 bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
1668 if (bmic_phy_info == NULL) {
1669 ret = PQI_STATUS_FAILURE;
1670 DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret);
1674 phy_log_dev_cnt = physical_cnt + logical_cnt;
1675 new_device_list = os_mem_alloc(softs,
1676 sizeof(*new_device_list) * phy_log_dev_cnt);
1678 if (new_device_list == NULL) {
1679 ret = PQI_STATUS_FAILURE;
1680 DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1684 for (i = 0; i < phy_log_dev_cnt; i++) {
1685 new_device_list[i] = os_mem_alloc(softs,
1686 sizeof(*new_device_list[i]));
1687 if (new_device_list[i] == NULL) {
1688 ret = PQI_STATUS_FAILURE;
1689 DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1695 ndev_allocated = phy_log_dev_cnt;
1697 for (i = 0; i < phy_log_dev_cnt; i++) {
1699 if (i < physical_cnt) {
1700 is_physical_device = true;
1701 lun_ext_entry = &physical_dev_list->lun_entries[i];
1703 is_physical_device = false;
1705 &logical_dev_list->lun_entries[i - physical_cnt];
1708 scsi3addr = lun_ext_entry->lunid;
1709 /* Save the target sas adderess for external raid device */
1710 if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
1711 int target = lun_ext_entry->lunid[3] & 0x3f;
1712 softs->target_sas_addr[target] = BE_64(lun_ext_entry->wwid);
1715 /* Skip masked physical non-disk devices. */
1716 if (MASKED_DEVICE(scsi3addr) && is_physical_device
1717 && (lun_ext_entry->ioaccel_handle == 0))
1720 device = new_device_list[new_dev_cnt];
1721 memset(device, 0, sizeof(*device));
1722 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1723 device->wwid = lun_ext_entry->wwid;
1724 device->is_physical_device = is_physical_device;
1725 if (!is_physical_device)
1726 device->is_external_raid_device =
1727 pqisrc_is_external_raid_addr(scsi3addr);
1730 /* Get device type, vendor, model, device ID. */
1731 ret = pqisrc_get_dev_data(softs, device);
1733 DBG_WARN("Inquiry failed, skipping device %016llx\n",
1734 (unsigned long long)BE_64(device->scsi3addr[0]));
1735 DBG_DISC("INQUIRY FAILED \n");
1738 pqisrc_assign_btl(device);
1741 * Expose all devices except for physical devices that
1744 if (device->is_physical_device &&
1745 MASKED_DEVICE(scsi3addr))
1746 device->expose_device = false;
1748 device->expose_device = true;
1750 if (device->is_physical_device &&
1751 (lun_ext_entry->device_flags &
1752 REPORT_LUN_DEV_FLAG_AIO_ENABLED) &&
1753 lun_ext_entry->ioaccel_handle) {
1754 device->aio_enabled = true;
1756 switch (device->devtype) {
1759 * We don't *really* support actual CD-ROM devices,
1760 * but we do support the HP "One Button Disaster
1761 * Recovery" tape drive which temporarily pretends to
1762 * be a CD-ROM drive.
1764 if (device->is_obdr_device)
1769 if (device->is_physical_device) {
1770 device->ioaccel_handle =
1771 lun_ext_entry->ioaccel_handle;
1772 device->sas_address = BE_64(lun_ext_entry->wwid);
1773 pqisrc_get_physical_device_info(softs, device,
1778 case ENCLOSURE_DEVICE:
1779 if (device->is_physical_device) {
1780 device->sas_address = BE_64(lun_ext_entry->wwid);
1785 case MEDIUM_CHANGER_DEVICE:
1790 * Only present the HBA controller itself as a RAID
1791 * controller. If it's a RAID controller other than
1792 * the HBA itself (an external RAID controller, MSA500
1793 * or similar), don't present it.
1795 if (pqisrc_is_hba_lunid(scsi3addr))
1799 case CONTROLLER_DEVICE:
1803 DBG_DISC("new_dev_cnt %d\n", new_dev_cnt);
1805 pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
1808 if (new_device_list) {
1809 for (i = 0; i < ndev_allocated; i++) {
1810 if (new_device_list[i]) {
1811 if(new_device_list[i]->raid_map)
1812 os_mem_free(softs, (char *)new_device_list[i]->raid_map,
1813 sizeof(pqisrc_raid_map_t));
1814 os_mem_free(softs, (char*)new_device_list[i],
1815 sizeof(*new_device_list[i]));
1818 os_mem_free(softs, (char *)new_device_list,
1819 sizeof(*new_device_list) * ndev_allocated);
1821 if(physical_dev_list)
1822 os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
1823 if(logical_dev_list)
1824 os_mem_free(softs, (char *)logical_dev_list, log_data_length);
1826 os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
1834 * Clean up memory allocated for devices.
1836 void pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
1840 pqi_scsi_dev_t *dvp = NULL;
1843 for(i = 0; i < PQI_MAX_DEVICES; i++) {
1844 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1845 if (softs->device_list[i][j] == NULL)
1847 dvp = softs->device_list[i][j];
1848 pqisrc_device_mem_free(softs, dvp);