]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_discovery.c
MFV r368464:
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_discovery.c
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /* $FreeBSD$ */
28
29 #include "smartpqi_includes.h"
30
31 /* Validate the scsi sense response code */
32 static inline boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
33 {
34         DBG_FUNC("IN\n");
35
36         if (!sshdr)
37                 return false;
38
39         DBG_FUNC("OUT\n");
40
41         return (sshdr->response_code & 0x70) == 0x70;
42 }
43
44 /* Initialize target ID pool for HBA/PDs */
45 void  pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
46 {
47         int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1;
48
49         for(i = 0; i < PQI_MAX_PHYSICALS; i++) {
50                 softs->tid_pool.tid[i] = tid--;
51         }
52         softs->tid_pool.index = i - 1;
53 }
54
55 int pqisrc_alloc_tid(pqisrc_softstate_t *softs)
56 {
57         if(softs->tid_pool.index <= -1) {
58                 DBG_ERR("Target ID exhausted\n");
59                 return INVALID_ELEM;
60         }
61
62         return  softs->tid_pool.tid[softs->tid_pool.index--];
63 }
64
65 void pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
66 {
67         if(softs->tid_pool.index >= PQI_MAX_PHYSICALS) {
68                 DBG_ERR("Target ID queue is full\n");
69                 return;
70         }
71
72         softs->tid_pool.index++;
73         softs->tid_pool.tid[softs->tid_pool.index] = tid;
74 }
75
76 /* Update scsi sense info to a local buffer*/
77 boolean_t pqisrc_update_scsi_sense(const uint8_t *buff, int len,
78                               struct sense_header_scsi *header)
79 {
80
81         DBG_FUNC("IN\n");
82
83         if (!buff || !len)
84                 return false;
85
86         memset(header, 0, sizeof(struct sense_header_scsi));
87
88         header->response_code = (buff[0] & 0x7f);
89
90         if (!pqisrc_scsi_sense_valid(header))
91                 return false;
92
93         if (header->response_code >= 0x72) {
94                 /* descriptor format */
95                 if (len > 1)
96                         header->sense_key = (buff[1] & 0xf);
97                 if (len > 2)
98                         header->asc = buff[2];
99                 if (len > 3)
100                         header->ascq = buff[3];
101                 if (len > 7)
102                         header->additional_length = buff[7];
103         } else {
104                  /* fixed format */
105                 if (len > 2)
106                         header->sense_key = (buff[2] & 0xf);
107                 if (len > 7) {
108                         len = (len < (buff[7] + 8)) ?
109                                         len : (buff[7] + 8);
110                         if (len > 12)
111                                 header->asc = buff[12];
112                         if (len > 13)
113                                 header->ascq = buff[13];
114                 }
115         }
116
117         DBG_FUNC("OUT\n");
118
119         return true;
120 }
121
122 /*
123  * Function used to build the internal raid request and analyze the response
124  */
125 int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs,  pqisrc_raid_req_t *request,
126                             void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
127                             raid_path_error_info_elem_t *error_info)
128 {
129
130         uint8_t *cdb;
131         int ret = PQI_STATUS_SUCCESS;
132         uint32_t tag = 0;
133         struct dma_mem device_mem;
134         sgt_t *sgd;
135
136         ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
137         ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
138
139         rcb_t *rcb = NULL;
140
141         DBG_FUNC("IN\n");
142
143         memset(&device_mem, 0, sizeof(struct dma_mem));
144
145         /* for TUR datasize: 0 buff: NULL */
146         if (datasize) {
147                 device_mem.tag = "device_mem";
148                 device_mem.size = datasize;
149                 device_mem.align = PQISRC_DEFAULT_DMA_ALIGN;
150
151                 ret = os_dma_mem_alloc(softs, &device_mem);
152
153                 if (ret) {
154                         DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
155                         return ret;
156                 }
157
158                 sgd = (sgt_t *)&request->sg_descriptors[0];
159
160                 sgd->addr = device_mem.dma_addr;
161                 sgd->len = datasize;
162                 sgd->flags = SG_FLAG_LAST;
163         }
164
165         /* Build raid path request */
166         request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
167
168         request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t,
169                                                         sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH);
170         request->buffer_length = LE_32(datasize);
171         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
172         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
173         request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
174
175         cdb = request->cdb;
176
177         switch (cmd) {
178         case SA_INQUIRY:
179                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
180                 cdb[0] = SA_INQUIRY;
181                 if (vpd_page & VPD_PAGE) {
182                         cdb[1] = 0x1;
183                         cdb[2] = (uint8_t)vpd_page;
184                 }
185                 cdb[4] = (uint8_t)datasize;
186                 break;
187         case SA_REPORT_LOG:
188         case SA_REPORT_PHYS:
189                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
190                 cdb[0] = cmd;
191                 if (cmd == SA_REPORT_PHYS)
192                         cdb[1] = SA_REPORT_PHYS_EXTENDED;
193                 else
194                 cdb[1] = SA_REPORT_LOG_EXTENDED;
195                 cdb[8] = (uint8_t)((datasize) >> 8);
196                 cdb[9] = (uint8_t)datasize;
197                 break;
198         case TEST_UNIT_READY:
199                 request->data_direction = SOP_DATA_DIR_NONE;
200                 break;
201         case SA_GET_RAID_MAP:
202                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
203                 cdb[0] = SA_CISS_READ;
204                 cdb[1] = cmd;
205                 cdb[8] = (uint8_t)((datasize) >> 8);
206                 cdb[9] = (uint8_t)datasize;
207                 break;
208         case SA_CACHE_FLUSH:
209                 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
210                 memcpy(device_mem.virt_addr, buff, datasize);
211                 cdb[0] = BMIC_WRITE;
212                 cdb[6] = BMIC_CACHE_FLUSH;
213                 cdb[7] = (uint8_t)((datasize)  << 8);
214                 cdb[8] = (uint8_t)((datasize)  >> 8);
215                 break;
216         case BMIC_IDENTIFY_CONTROLLER:
217         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
218                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
219                 cdb[0] = BMIC_READ;
220                 cdb[6] = cmd;
221                 cdb[7] = (uint8_t)((datasize)  << 8);
222                 cdb[8] = (uint8_t)((datasize)  >> 8);
223                 break;
224         case BMIC_WRITE_HOST_WELLNESS:
225                 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
226                 memcpy(device_mem.virt_addr, buff, datasize);
227                 cdb[0] = BMIC_WRITE;
228                 cdb[6] = cmd;
229                 cdb[7] = (uint8_t)((datasize)  << 8);
230                 cdb[8] = (uint8_t)((datasize)  >> 8);
231                 break;
232         case BMIC_SENSE_SUBSYSTEM_INFORMATION:
233                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
234                 cdb[0] = BMIC_READ;
235                 cdb[6] = cmd;
236                 cdb[7] = (uint8_t)((datasize)  << 8);
237                 cdb[8] = (uint8_t)((datasize)  >> 8);
238                 break;  
239         default:
240                 DBG_ERR("unknown command 0x%x", cmd);
241                 break;
242         }
243
244         tag = pqisrc_get_tag(&softs->taglist);
245         if (INVALID_ELEM == tag) {
246                 DBG_ERR("Tag not available\n");
247                 ret = PQI_STATUS_FAILURE;
248                 goto err_notag;
249         }
250
251         ((pqisrc_raid_req_t *)request)->request_id = tag;
252         ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id;
253         ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id;
254         rcb = &softs->rcb[tag];
255         rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
256         rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
257
258         rcb->req_pending = true;
259         rcb->tag = tag;
260         /* Submit Command */
261         ret = pqisrc_submit_cmnd(softs, ib_q, request);
262
263         if (ret != PQI_STATUS_SUCCESS) {
264                 DBG_ERR("Unable to submit command\n");
265                 goto err_out;
266         }
267
268         ret = pqisrc_wait_on_condition(softs, rcb);
269         if (ret != PQI_STATUS_SUCCESS) {
270                 DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
271                 goto err_out;
272         }
273
274         if (datasize) {
275                 if (buff) {
276                         memcpy(buff, device_mem.virt_addr, datasize);
277                 }
278                 os_dma_mem_free(softs, &device_mem);
279         }
280
281         ret = rcb->status;
282         if (ret) {
283                 if(error_info) {
284                         memcpy(error_info, 
285                                rcb->error_info,
286                                sizeof(*error_info));
287
288                         if (error_info->data_out_result ==
289                             PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
290                                 ret = PQI_STATUS_SUCCESS;
291                         }
292                         else{
293                                 DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x," 
294                                         "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr), 
295                                         BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), 
296                                         cmd, ret);
297                                 ret = PQI_STATUS_FAILURE;
298                         }
299                 }
300         } else {
301                 if(error_info) {
302                         ret = PQI_STATUS_SUCCESS;
303                         memset(error_info, 0, sizeof(*error_info));
304                 }
305         }
306
307         os_reset_rcb(rcb);
308         pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
309         DBG_FUNC("OUT\n");
310         return ret;
311
312 err_out:
313         DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n", 
314                 BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), 
315                 cmd, ret);
316         os_reset_rcb(rcb);
317         pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
318 err_notag:
319         if (datasize)
320                 os_dma_mem_free(softs, &device_mem);
321         DBG_FUNC("FAILED \n");
322         return ret;
323 }
324
325 /* common function used to send report physical and logical luns cmnds*/
326 static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
327         void *buff, size_t buf_len)
328 {
329         int ret;
330         pqisrc_raid_req_t request;
331
332         DBG_FUNC("IN\n");
333
334         memset(&request, 0, sizeof(request));
335         ret =  pqisrc_build_send_raid_request(softs, &request, buff, 
336                                 buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
337
338         DBG_FUNC("OUT\n");
339
340         return ret;
341 }
342
343 /* subroutine used to get physical and logical luns of the device */
344 static int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
345                 reportlun_data_ext_t **buff, size_t *data_length)
346 {
347         int ret;
348         size_t list_len;
349         size_t data_len;
350         size_t new_lun_list_length;
351         reportlun_data_ext_t *lun_data;
352         reportlun_header_t report_lun_header;
353
354         DBG_FUNC("IN\n");
355
356         ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
357                 sizeof(report_lun_header));
358
359         if (ret) {
360                 DBG_ERR("failed return code: %d\n", ret);
361                 return ret;
362         }
363         list_len = BE_32(report_lun_header.list_length);
364
365 retry:
366         data_len = sizeof(reportlun_header_t) + list_len;
367         *data_length = data_len;
368
369         lun_data = os_mem_alloc(softs, data_len);
370
371         if (!lun_data) {
372                 DBG_ERR("failed to allocate memory for lun_data\n");
373                 return PQI_STATUS_FAILURE;
374         }
375                 
376         if (list_len == 0) {
377                 DBG_DISC("list_len is 0\n");
378                 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
379                 goto out;
380         }
381
382         ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
383
384         if (ret) {
385                 DBG_ERR("error\n");
386                 goto error;
387         }
388
389         new_lun_list_length = BE_32(lun_data->header.list_length);
390
391         if (new_lun_list_length > list_len) {
392                 list_len = new_lun_list_length;
393                 os_mem_free(softs, (void *)lun_data, data_len);
394                 goto retry;
395         }
396
397 out:
398         *buff = lun_data;
399         DBG_FUNC("OUT\n");
400         return 0;
401
402 error:
403         os_mem_free(softs, (void *)lun_data, data_len);
404         DBG_ERR("FAILED\n");
405         return ret;
406 }
407
408 /*
409  * Function used to get physical and logical device list
410  */
411 static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
412         reportlun_data_ext_t **physical_dev_list,
413         reportlun_data_ext_t **logical_dev_list, 
414         size_t *phys_data_length,
415         size_t *log_data_length)
416 {
417         int ret = PQI_STATUS_SUCCESS;
418         size_t logical_list_length;
419         size_t logdev_data_length;
420         size_t data_length;
421         reportlun_data_ext_t *local_logdev_list;
422         reportlun_data_ext_t *logdev_data;
423         reportlun_header_t report_lun_header;
424
425         DBG_FUNC("IN\n");
426
427         ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length);
428         if (ret) {
429                 DBG_ERR("report physical LUNs failed");
430                 return ret;
431         }
432
433         ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length);
434         if (ret) {
435                 DBG_ERR("report logical LUNs failed");
436                 return ret;
437         }
438
439         logdev_data = *logical_dev_list;
440
441         if (logdev_data) {
442                 logical_list_length =
443                         BE_32(logdev_data->header.list_length);
444         } else {
445                 memset(&report_lun_header, 0, sizeof(report_lun_header));
446                 logdev_data =
447                         (reportlun_data_ext_t *)&report_lun_header;
448                 logical_list_length = 0;
449         }
450
451         logdev_data_length = sizeof(reportlun_header_t) +
452                 logical_list_length;
453
454         /* Adding LOGICAL device entry for controller */
455         local_logdev_list = os_mem_alloc(softs,
456                                             logdev_data_length + sizeof(reportlun_ext_entry_t));
457         if (!local_logdev_list) {
458                 data_length = *log_data_length;
459                 os_mem_free(softs, (char *)*logical_dev_list, data_length);
460                 *logical_dev_list = NULL;
461                 return PQI_STATUS_FAILURE;
462         }
463
464         memcpy(local_logdev_list, logdev_data, logdev_data_length);
465         memset((uint8_t *)local_logdev_list + logdev_data_length, 0,
466                 sizeof(reportlun_ext_entry_t));
467         local_logdev_list->header.list_length = BE_32(logical_list_length +
468                                                         sizeof(reportlun_ext_entry_t));
469         data_length = *log_data_length;
470         os_mem_free(softs, (char *)*logical_dev_list, data_length);
471         *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t);
472         *logical_dev_list = local_logdev_list;
473
474         DBG_FUNC("OUT\n");
475
476         return ret;
477 }
478
479 /* Subroutine used to set Bus-Target-Lun for the requested device */
480 static inline void pqisrc_set_btl(pqi_scsi_dev_t *device,
481         int bus, int target, int lun)
482 {
483         DBG_FUNC("IN\n");
484
485         device->bus = bus;
486         device->target = target;
487         device->lun = lun;
488
489         DBG_FUNC("OUT\n");
490 }
491
492 inline boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
493 {
494         return device->is_external_raid_device;
495 }
496
497 static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
498 {
499         return scsi3addr[2] != 0;
500 }
501
502 /* Function used to assign Bus-Target-Lun for the requested device */
503 static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
504 {
505         uint8_t *scsi3addr;
506         uint32_t lunid;
507         uint32_t bus;
508         uint32_t target;
509         uint32_t lun;
510         DBG_FUNC("IN\n");
511
512         scsi3addr = device->scsi3addr;
513         lunid = GET_LE32(scsi3addr);
514
515         if (pqisrc_is_hba_lunid(scsi3addr)) {
516                 /* The specified device is the controller. */
517                 pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, lunid & 0x3fff);
518                 device->target_lun_valid = true;
519                 return;
520         }
521
522         if (pqisrc_is_logical_device(device)) {
523                 if (pqisrc_is_external_raid_device(device)) {
524                         DBG_DISC("External Raid Device!!!");
525                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
526                         target = (lunid >> 16) & 0x3fff;
527                         lun = lunid & 0xff;
528                 } else {
529                         bus = PQI_RAID_VOLUME_BUS;
530                         lun = 0;
531                         target = lunid & 0x3fff;
532                 }
533                 pqisrc_set_btl(device, bus, target, lun);
534                 device->target_lun_valid = true;
535                 return;
536         }
537
538         DBG_FUNC("OUT\n");
539 }
540
541 /* Build and send the internal INQUIRY command to particular device */
542 static int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
543         uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
544 {
545         int ret = PQI_STATUS_SUCCESS;
546         pqisrc_raid_req_t request;
547         raid_path_error_info_elem_t error_info;
548
549         DBG_FUNC("IN\n");
550
551         memset(&request, 0, sizeof(request));
552         ret =  pqisrc_build_send_raid_request(softs, &request, buff, buf_len, 
553                                                                 SA_INQUIRY, vpd_page, scsi3addr, &error_info);
554
555         DBG_FUNC("OUT\n");
556         return ret;
557 }
558
559 /* Function used to parse the sense information from response */
560 static void pqisrc_fetch_sense_info(const uint8_t *sense_data,
561         unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
562 {
563         struct sense_header_scsi header;
564
565         DBG_FUNC("IN\n");
566
567         *sense_key = 0;
568         *ascq = 0;
569         *asc = 0;
570
571         if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) {
572                 *sense_key = header.sense_key;
573                 *asc = header.asc;
574                 *ascq = header.ascq;
575         }
576
577         DBG_DISC("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq);
578
579         DBG_FUNC("OUT\n");
580 }
581
582 /* Function used to validate volume offline status */
583 static uint8_t pqisrc_get_volume_offline_status(pqisrc_softstate_t *softs,
584         uint8_t *scsi3addr)
585 {
586         int ret = PQI_STATUS_SUCCESS;
587         uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
588         uint8_t size;
589         uint8_t *buff = NULL;
590
591         DBG_FUNC("IN\n");
592
593         buff = os_mem_alloc(softs, 64);
594         if (!buff)
595                 return PQI_STATUS_FAILURE;
596
597         /* Get the size of the VPD return buff. */
598         ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
599                 buff, SCSI_VPD_HEADER_LENGTH);
600
601         if (ret)
602                 goto out;
603
604         size = buff[3];
605
606         /* Now get the whole VPD buff. */
607         ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
608                 buff, size + SCSI_VPD_HEADER_LENGTH);
609         if (ret)
610                 goto out;
611
612         status = buff[4];
613
614 out:
615         os_mem_free(softs, (char *)buff, 64);
616         DBG_FUNC("OUT\n");
617
618         return status;
619 }
620
621 /* Determine offline status of a volume.  Returns appropriate SA_LV_* status.*/
622 static uint8_t pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
623         uint8_t *scsi3addr)
624 {
625         int ret = PQI_STATUS_SUCCESS;
626         uint8_t *sense_data;
627         unsigned sense_data_len;
628         uint8_t sense_key;
629         uint8_t asc;
630         uint8_t ascq;
631         uint8_t off_status;
632         uint8_t scsi_status;
633         pqisrc_raid_req_t request;
634         raid_path_error_info_elem_t error_info;
635
636         DBG_FUNC("IN\n");
637
638         memset(&request, 0, sizeof(request));   
639         ret =  pqisrc_build_send_raid_request(softs, &request, NULL, 0, 
640                                 TEST_UNIT_READY, 0, scsi3addr, &error_info);
641
642         if (ret)
643                 goto error;
644         sense_data = error_info.data;
645         sense_data_len = LE_16(error_info.sense_data_len);
646
647         if (sense_data_len > sizeof(error_info.data))
648                 sense_data_len = sizeof(error_info.data);
649
650         pqisrc_fetch_sense_info(sense_data, sense_data_len, &sense_key, &asc,
651                 &ascq);
652
653         scsi_status = error_info.status;
654
655         /* scsi status: "CHECK CONDN" /  SK: "not ready" ? */
656         if (scsi_status != 2 ||
657             sense_key != 2 ||
658             asc != ASC_LUN_NOT_READY) {
659                 return SA_LV_OK;
660         }
661
662         /* Determine the reason for not ready state. */
663         off_status = pqisrc_get_volume_offline_status(softs, scsi3addr);
664
665         DBG_DISC("offline_status 0x%x\n", off_status);
666
667         /* Keep volume offline in certain cases. */
668         switch (off_status) {
669         case SA_LV_UNDERGOING_ERASE:
670         case SA_LV_NOT_AVAILABLE:
671         case SA_LV_UNDERGOING_RPI:
672         case SA_LV_PENDING_RPI:
673         case SA_LV_ENCRYPTED_NO_KEY:
674         case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
675         case SA_LV_UNDERGOING_ENCRYPTION:
676         case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
677         case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
678                 return off_status;
679         case SA_LV_STATUS_VPD_UNSUPPORTED:
680                 /*
681                  * If the VPD status page isn't available,
682                  * use ASC/ASCQ to determine state.
683                  */
684                 if (ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS ||
685                     ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)
686                         return off_status;
687                 break;
688         }
689
690         DBG_FUNC("OUT\n");
691
692         return SA_LV_OK;
693
694 error:
695         return SA_LV_STATUS_VPD_UNSUPPORTED;
696 }
697
698 /* Validate the RAID map parameters */
699 static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
700         pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
701 {
702         char *error_msg;
703         uint32_t raidmap_size;
704         uint32_t r5or6_blocks_per_row;
705         unsigned phys_dev_num;
706         unsigned num_raidmap_entries;
707
708         DBG_FUNC("IN\n");
709
710         raidmap_size = LE_32(raid_map->structure_size);
711         if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) {
712                 error_msg = "RAID map too small\n";
713                 goto error;
714         }
715
716         if (raidmap_size > sizeof(*raid_map)) {
717                 error_msg = "RAID map too large\n";
718                 goto error;
719         }
720
721         phys_dev_num = LE_16(raid_map->layout_map_count) *
722                 (LE_16(raid_map->data_disks_per_row) +
723                 LE_16(raid_map->metadata_disks_per_row));
724         num_raidmap_entries = phys_dev_num *
725                 LE_16(raid_map->row_cnt);
726
727         if (num_raidmap_entries > RAID_MAP_MAX_ENTRIES) {
728                 error_msg = "invalid number of map entries in RAID map\n";
729                 goto error;
730         }
731
732         if (device->raid_level == SA_RAID_1) {
733                 if (LE_16(raid_map->layout_map_count) != 2) {
734                         error_msg = "invalid RAID-1 map\n";
735                         goto error;
736                 }
737         } else if (device->raid_level == SA_RAID_ADM) {
738                 if (LE_16(raid_map->layout_map_count) != 3) {
739                         error_msg = "invalid RAID-1(ADM) map\n";
740                         goto error;
741                 }
742         } else if ((device->raid_level == SA_RAID_5 ||
743                 device->raid_level == SA_RAID_6) &&
744                 LE_16(raid_map->layout_map_count) > 1) {
745                 /* RAID 50/60 */
746                 r5or6_blocks_per_row =
747                         LE_16(raid_map->strip_size) *
748                         LE_16(raid_map->data_disks_per_row);
749                 if (r5or6_blocks_per_row == 0) {
750                         error_msg = "invalid RAID-5 or RAID-6 map\n";
751                         goto error;
752                 }
753         }
754
755         DBG_FUNC("OUT\n");
756
757         return 0;
758
759 error:
760         DBG_ERR("%s\n", error_msg);
761         return PQI_STATUS_FAILURE;
762 }
763
764 /* Get device raidmap for the requested device */
765 static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
766         pqi_scsi_dev_t *device)
767 {
768         int ret = PQI_STATUS_SUCCESS;
769         pqisrc_raid_req_t request;
770         pqisrc_raid_map_t *raid_map;
771
772         DBG_FUNC("IN\n");
773
774         raid_map = os_mem_alloc(softs, sizeof(*raid_map));
775         if (!raid_map)
776                 return PQI_STATUS_FAILURE;
777
778         memset(&request, 0, sizeof(request));
779         ret =  pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map), 
780                                         SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
781
782         if (ret) {
783                 DBG_ERR("error in build send raid req ret=%d\n", ret);
784                 goto err_out;
785         }
786
787         ret = pqisrc_raid_map_validation(softs, device, raid_map);
788         if (ret) {
789                 DBG_ERR("error in raid map validation ret=%d\n", ret);
790                 goto err_out;
791         }
792
793         device->raid_map = raid_map;
794         DBG_FUNC("OUT\n");
795         return 0;
796
797 err_out:
798         os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
799         DBG_FUNC("FAILED \n");
800         return ret;
801 }
802
803 /* Get device ioaccel_status to validate the type of device */
804 static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
805         pqi_scsi_dev_t *device)
806 {
807         int ret = PQI_STATUS_SUCCESS;
808         uint8_t *buff;
809         uint8_t ioaccel_status;
810
811         DBG_FUNC("IN\n");
812
813         buff = os_mem_alloc(softs, 64);
814         if (!buff)
815                 return;
816
817         ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
818                                         VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64);
819         if (ret) {
820                 DBG_ERR("error in send scsi inquiry ret=%d\n", ret);
821                 goto err_out;
822         }
823
824         ioaccel_status = buff[IOACCEL_STATUS_BYTE];
825         device->offload_config =
826                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
827
828         if (device->offload_config) {
829                 device->offload_enabled_pending =
830                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
831                 if (pqisrc_get_device_raidmap(softs, device))
832                         device->offload_enabled_pending = false;
833         }
834
835         DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n", 
836                         device->offload_config, device->offload_enabled_pending);
837
838 err_out:
839         os_mem_free(softs, (char*)buff, 64);
840         DBG_FUNC("OUT\n");
841 }
842
843 /* Get RAID level of requested device */
844 static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs,
845         pqi_scsi_dev_t *device)
846 {
847         uint8_t raid_level;
848         uint8_t *buff;
849
850         DBG_FUNC("IN\n");
851
852         raid_level = SA_RAID_UNKNOWN;
853
854         buff = os_mem_alloc(softs, 64);
855         if (buff) {
856                 int ret;
857                 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
858                         VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64);
859                 if (ret == 0) {
860                         raid_level = buff[8];
861                         if (raid_level > SA_RAID_MAX)
862                                 raid_level = SA_RAID_UNKNOWN;
863                 }
864                 os_mem_free(softs, (char*)buff, 64);
865         }
866
867         device->raid_level = raid_level;
868         DBG_DISC("RAID LEVEL: %x \n",  raid_level);
869         DBG_FUNC("OUT\n");
870 }
871
872 /* Parse the inquiry response and determine the type of device */
873 static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
874         pqi_scsi_dev_t *device)
875 {
876         int ret = PQI_STATUS_SUCCESS;
877         uint8_t *inq_buff;
878
879         DBG_FUNC("IN\n");
880
881         inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE);
882         if (!inq_buff)
883                 return PQI_STATUS_FAILURE;
884
885         /* Send an inquiry to the device to see what it is. */
886         ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
887                 OBDR_TAPE_INQ_SIZE);
888         if (ret)
889                 goto err_out;
890         pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
891         pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
892
893         device->devtype = inq_buff[0] & 0x1f;
894         memcpy(device->vendor, &inq_buff[8],
895                 sizeof(device->vendor));
896         memcpy(device->model, &inq_buff[16],
897                 sizeof(device->model));
898         DBG_DISC("DEV_TYPE: %x VENDOR: %s MODEL: %s\n",  device->devtype, device->vendor, device->model);
899
900         if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
901                 if (pqisrc_is_external_raid_device(device)) {
902                         device->raid_level = SA_RAID_UNKNOWN;
903                         device->volume_status = SA_LV_OK;
904                         device->volume_offline = false;
905                 } 
906                 else {
907                         pqisrc_get_dev_raid_level(softs, device);
908                         pqisrc_get_dev_ioaccel_status(softs, device);
909                         device->volume_status = pqisrc_get_dev_vol_status(softs,
910                                                 device->scsi3addr);
911                         device->volume_offline = device->volume_status != SA_LV_OK;
912                 }
913         }
914
915         /*
916          * Check if this is a One-Button-Disaster-Recovery device
917          * by looking for "$DR-10" at offset 43 in the inquiry data.
918          */
919         device->is_obdr_device = (device->devtype == ROM_DEVICE &&
920                 memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG,
921                         OBDR_SIG_LEN) == 0);
922 err_out:
923         os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE);
924
925         DBG_FUNC("OUT\n");
926         return ret;
927 }
928
929 /*
930  * BMIC (Basic Management And Interface Commands) command
931  * to get the controller identify params
932  */
933 static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs,
934         bmic_ident_ctrl_t *buff)
935 {
936         int ret = PQI_STATUS_SUCCESS;
937         pqisrc_raid_req_t request;
938
939         DBG_FUNC("IN\n");
940
941         memset(&request, 0, sizeof(request));   
942         ret =  pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff), 
943                                 BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
944         DBG_FUNC("OUT\n");
945
946         return ret;
947 }
948
949 /* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
950 int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
951 {
952         int ret = PQI_STATUS_SUCCESS;
953         bmic_ident_ctrl_t *identify_ctrl;
954
955         DBG_FUNC("IN\n");
956
957         identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl));
958         if (!identify_ctrl) {
959                 DBG_ERR("failed to allocate memory for identify_ctrl\n");
960                 return PQI_STATUS_FAILURE;
961         }
962
963         memset(identify_ctrl, 0, sizeof(*identify_ctrl));
964
965         ret = pqisrc_identify_ctrl(softs, identify_ctrl);
966         if (ret)
967                 goto out;
968      
969         softs->fw_build_number = identify_ctrl->fw_build_number;
970         memcpy(softs->fw_version, identify_ctrl->fw_version,
971                 sizeof(identify_ctrl->fw_version));
972         softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0';
973         snprintf(softs->fw_version +
974                 strlen(softs->fw_version),
975                 sizeof(softs->fw_version),
976                 "-%u", identify_ctrl->fw_build_number);
977 out:
978         os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
979         DBG_INIT("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
980         DBG_FUNC("OUT\n");
981         return ret;
982 }
983
984 /* BMIC command to determine scsi device identify params */
985 static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
986         pqi_scsi_dev_t *device,
987         bmic_ident_physdev_t *buff,
988         int buf_len)
989 {
990         int ret = PQI_STATUS_SUCCESS;
991         uint16_t bmic_device_index;
992         pqisrc_raid_req_t request;
993
994         DBG_FUNC("IN\n");
995
996         memset(&request, 0, sizeof(request));   
997         bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
998         request.cdb[2] = (uint8_t)bmic_device_index;
999         request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
1000
1001         ret =  pqisrc_build_send_raid_request(softs, &request, buff, buf_len, 
1002                                 BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1003         DBG_FUNC("OUT\n");
1004         return ret;
1005 }
1006
1007 /*
1008  * Function used to get the scsi device information using one of BMIC
1009  * BMIC_IDENTIFY_PHYSICAL_DEVICE
1010  */
1011 static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
1012         pqi_scsi_dev_t *device,
1013         bmic_ident_physdev_t *id_phys)
1014 {
1015         int ret = PQI_STATUS_SUCCESS;
1016
1017         DBG_FUNC("IN\n");
1018         memset(id_phys, 0, sizeof(*id_phys));
1019
1020         ret= pqisrc_identify_physical_disk(softs, device,
1021                 id_phys, sizeof(*id_phys));
1022         if (ret) {
1023                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1024                 return;
1025         }
1026
1027         device->queue_depth =
1028                 LE_16(id_phys->current_queue_depth_limit);
1029         device->device_type = id_phys->device_type;
1030         device->active_path_index = id_phys->active_path_number;
1031         device->path_map = id_phys->redundant_path_present_map;
1032         memcpy(&device->box,
1033                 &id_phys->alternate_paths_phys_box_on_port,
1034                 sizeof(device->box));
1035         memcpy(&device->phys_connector,
1036                 &id_phys->alternate_paths_phys_connector,
1037                 sizeof(device->phys_connector));
1038         device->bay = id_phys->phys_bay_in_box;
1039
1040         DBG_DISC("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n",  device->device_type, device->queue_depth);
1041         DBG_FUNC("OUT\n");
1042 }
1043
1044 /* Function used to find the entry of the device in a list */
1045 static device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
1046         pqi_scsi_dev_t *device_to_find,
1047         pqi_scsi_dev_t **same_device)
1048 {
1049         pqi_scsi_dev_t *device;
1050         int i,j;
1051         DBG_FUNC("IN\n");
1052         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1053                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1054                         if(softs->device_list[i][j] == NULL)
1055                                 continue;
1056                         device = softs->device_list[i][j];
1057                         if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
1058                                 device->scsi3addr)) {
1059                                 *same_device = device;
1060                                 if (pqisrc_device_equal(device_to_find, device)) {
1061                                         if (device_to_find->volume_offline)
1062                                                 return DEVICE_CHANGED;
1063                                         return DEVICE_UNCHANGED;
1064                                 }
1065                                 return DEVICE_CHANGED;
1066                         }
1067                 }
1068         }
1069         DBG_FUNC("OUT\n");
1070
1071         return DEVICE_NOT_FOUND;
1072 }
1073
1074 /* Update the newly added devices as existed device */
1075 static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
1076         pqi_scsi_dev_t *device_exist,
1077         pqi_scsi_dev_t *new_device)
1078 {
1079         DBG_FUNC("IN\n");
1080         device_exist->expose_device = new_device->expose_device;
1081         memcpy(device_exist->vendor, new_device->vendor,
1082                 sizeof(device_exist->vendor));
1083         memcpy(device_exist->model, new_device->model,
1084                 sizeof(device_exist->model));
1085         device_exist->is_physical_device = new_device->is_physical_device;
1086         device_exist->is_external_raid_device =
1087                 new_device->is_external_raid_device;
1088         device_exist->sas_address = new_device->sas_address;
1089         device_exist->raid_level = new_device->raid_level;
1090         device_exist->queue_depth = new_device->queue_depth;
1091         device_exist->ioaccel_handle = new_device->ioaccel_handle;
1092         device_exist->volume_status = new_device->volume_status;
1093         device_exist->active_path_index = new_device->active_path_index;
1094         device_exist->path_map = new_device->path_map;
1095         device_exist->bay = new_device->bay;
1096         memcpy(device_exist->box, new_device->box,
1097                 sizeof(device_exist->box));
1098         memcpy(device_exist->phys_connector, new_device->phys_connector,
1099                 sizeof(device_exist->phys_connector));
1100         device_exist->offload_config = new_device->offload_config;
1101         device_exist->offload_enabled = false;
1102         device_exist->offload_enabled_pending =
1103                 new_device->offload_enabled_pending;
1104         device_exist->offload_to_mirror = 0;
1105         if (device_exist->raid_map)
1106                 os_mem_free(softs,
1107                             (char *)device_exist->raid_map,
1108                             sizeof(*device_exist->raid_map));
1109         device_exist->raid_map = new_device->raid_map;
1110         /* To prevent this from being freed later. */
1111         new_device->raid_map = NULL;
1112         DBG_FUNC("OUT\n");
1113 }
1114
1115 /* Validate the ioaccel_handle for a newly added device */
1116 static pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
1117         pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
1118 {
1119         pqi_scsi_dev_t *device;
1120         int i,j;
1121         DBG_FUNC("IN\n");       
1122         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1123                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1124                         if(softs->device_list[i][j] == NULL)
1125                                 continue;
1126                         device = softs->device_list[i][j];
1127                         if (device->devtype != DISK_DEVICE)
1128                                 continue;
1129                         if (pqisrc_is_logical_device(device))
1130                                 continue;
1131                         if (device->ioaccel_handle == ioaccel_handle)
1132                                 return device;
1133                 }
1134         }
1135         DBG_FUNC("OUT\n");
1136
1137         return NULL;
1138 }
1139
1140 /* Get the scsi device queue depth */
1141 static void pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
1142 {
1143         unsigned i;
1144         unsigned phys_dev_num;
1145         unsigned num_raidmap_entries;
1146         unsigned queue_depth;
1147         pqisrc_raid_map_t *raid_map;
1148         pqi_scsi_dev_t *device;
1149         raidmap_data_t *dev_data;
1150         pqi_scsi_dev_t *phys_disk;
1151         unsigned j;
1152         unsigned k;
1153
1154         DBG_FUNC("IN\n");
1155
1156         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1157                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1158                         if(softs->device_list[i][j] == NULL)
1159                                 continue;
1160                         device = softs->device_list[i][j];
1161                         if (device->devtype != DISK_DEVICE)
1162                                 continue;
1163                         if (!pqisrc_is_logical_device(device))
1164                                 continue;
1165                         if (pqisrc_is_external_raid_device(device))
1166                                 continue;
1167                         device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1168                         raid_map = device->raid_map;
1169                         if (!raid_map)
1170                                 return;
1171                         dev_data = raid_map->dev_data;
1172                         phys_dev_num = LE_16(raid_map->layout_map_count) *
1173                                         (LE_16(raid_map->data_disks_per_row) +
1174                                         LE_16(raid_map->metadata_disks_per_row));
1175                         num_raidmap_entries = phys_dev_num *
1176                                                 LE_16(raid_map->row_cnt);
1177
1178                         queue_depth = 0;
1179                         for (k = 0; k < num_raidmap_entries; k++) {
1180                                 phys_disk = pqisrc_identify_device_via_ioaccel(softs,
1181                                                 dev_data[k].ioaccel_handle);
1182
1183                                 if (!phys_disk) {
1184                                         DBG_WARN(
1185                                         "Failed to find physical disk handle for logical drive %016llx\n",
1186                                                 (unsigned long long)BE_64(device->scsi3addr[0]));
1187                                         device->offload_enabled = false;
1188                                         device->offload_enabled_pending = false;
1189                                         if (raid_map)
1190                                                 os_mem_free(softs, (char *)raid_map, sizeof(*raid_map));
1191                                         device->raid_map = NULL;
1192                                         return;
1193                                 }
1194
1195                                 queue_depth += phys_disk->queue_depth;
1196                         }
1197
1198                         device->queue_depth = queue_depth;
1199                 } /* end inner loop */
1200         }/* end outer loop */
1201         DBG_FUNC("OUT\n");
1202 }
1203
1204 /* Function used to add a scsi device to OS scsi subsystem */
1205 static int pqisrc_add_device(pqisrc_softstate_t *softs,
1206         pqi_scsi_dev_t *device)
1207 {
1208         DBG_FUNC("IN\n");
1209         DBG_DISC("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1210                 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1211
1212         device->invalid = false;
1213
1214         if(device->expose_device) {
1215                 /* TBD: Call OS upper layer function to add the device entry */
1216                 os_add_device(softs,device);
1217         }
1218         DBG_FUNC("OUT\n");
1219         return PQI_STATUS_SUCCESS;
1220
1221 }
1222
1223 /* Function used to remove a scsi device from OS scsi subsystem */
1224 void pqisrc_remove_device(pqisrc_softstate_t *softs,
1225         pqi_scsi_dev_t *device)
1226 {
1227         DBG_FUNC("IN\n");
1228         DBG_DISC("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1229                 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1230
1231         /* TBD: Call OS upper layer function to remove the device entry */
1232         device->invalid = true;
1233         os_remove_device(softs,device);
1234         DBG_FUNC("OUT\n");
1235 }
1236
1237 /*
1238  * When exposing new device to OS fails then adjst list according to the
1239  * mid scsi list
1240  */
1241 static void pqisrc_adjust_list(pqisrc_softstate_t *softs,
1242         pqi_scsi_dev_t *device)
1243 {
1244         DBG_FUNC("IN\n");
1245
1246         if (!device) {
1247                 DBG_ERR("softs = %p: device is NULL !!!\n", softs);
1248                 return;
1249         }
1250
1251         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1252         softs->device_list[device->target][device->lun] = NULL;
1253         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1254         pqisrc_device_mem_free(softs, device);
1255
1256         DBG_FUNC("OUT\n");
1257 }
1258
1259 /* Debug routine used to display the RAID volume status of the device */
1260 static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
1261         pqi_scsi_dev_t *device)
1262 {
1263         char *status;
1264
1265         DBG_FUNC("IN\n");
1266         switch (device->volume_status) {
1267         case SA_LV_OK:
1268                 status = "Volume is online.";
1269                 break;
1270         case SA_LV_UNDERGOING_ERASE:
1271                 status = "Volume is undergoing background erase process.";
1272                 break;
1273         case SA_LV_NOT_AVAILABLE:
1274                 status = "Volume is waiting for transforming volume.";
1275                 break;
1276         case SA_LV_UNDERGOING_RPI:
1277                 status = "Volume is undergoing rapid parity initialization process.";
1278                 break;
1279         case SA_LV_PENDING_RPI:
1280                 status = "Volume is queued for rapid parity initialization process.";
1281                 break;
1282         case SA_LV_ENCRYPTED_NO_KEY:
1283                 status = "Volume is encrypted and cannot be accessed because key is not present.";
1284                 break;
1285         case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1286                 status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.";
1287                 break;
1288         case SA_LV_UNDERGOING_ENCRYPTION:
1289                 status = "Volume is undergoing encryption process.";
1290                 break;
1291         case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1292                 status = "Volume is undergoing encryption re-keying process.";
1293                 break;
1294         case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1295                 status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled.";
1296                 break;
1297         case SA_LV_PENDING_ENCRYPTION:
1298                 status = "Volume is pending migration to encrypted state, but process has not started.";
1299                 break;
1300         case SA_LV_PENDING_ENCRYPTION_REKEYING:
1301                 status = "Volume is encrypted and is pending encryption rekeying.";
1302                 break;
1303         case SA_LV_STATUS_VPD_UNSUPPORTED:
1304                 status = "Volume status is not available through vital product data pages.";
1305                 break;
1306         default:
1307                 status = "Volume is in an unknown state.";
1308                 break;
1309         }
1310
1311         DBG_DISC("scsi BTL %d:%d:%d %s\n",
1312                 device->bus, device->target, device->lun, status);
1313         DBG_FUNC("OUT\n");
1314 }
1315
1316 void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1317 {
1318         DBG_FUNC("IN\n");
1319         if (!device)
1320                 return;
1321         if (device->raid_map) {
1322                         os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t));
1323         }
1324         os_mem_free(softs, (char *)device,sizeof(*device));
1325         DBG_FUNC("OUT\n");
1326
1327 }
1328
1329 /* OS should call this function to free the scsi device */
1330 void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
1331 {
1332
1333                 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1334                 if (!pqisrc_is_logical_device(device)) {
1335                         pqisrc_free_tid(softs,device->target);
1336                 }
1337                 pqisrc_device_mem_free(softs, device);
1338                 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1339
1340 }
1341
1342 /* Update the newly added devices to the device list */
1343 static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
1344         pqi_scsi_dev_t *new_device_list[], int num_new_devices)
1345 {
1346         int ret;
1347         int i;
1348         device_status_t dev_status;
1349         pqi_scsi_dev_t *device;
1350         pqi_scsi_dev_t *same_device;
1351         pqi_scsi_dev_t **added = NULL;
1352         pqi_scsi_dev_t **removed = NULL;
1353         int nadded = 0, nremoved = 0;
1354         int j;
1355         int tid = 0;
1356
1357         DBG_FUNC("IN\n");
1358
1359         added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES);
1360         removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES);
1361
1362         if (!added || !removed) {
1363                 DBG_WARN("Out of memory \n");
1364                 goto free_and_out;
1365         }
1366
1367         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1368
1369         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1370                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1371                         if(softs->device_list[i][j] == NULL)
1372                                 continue;
1373                         device = softs->device_list[i][j];
1374                         device->device_gone = true;
1375                 }
1376         }
1377         DBG_IO("Device list used an array\n");
1378         for (i = 0; i < num_new_devices; i++) {
1379                 device = new_device_list[i];
1380
1381                 dev_status = pqisrc_scsi_find_entry(softs, device,
1382                         &same_device);
1383
1384                 switch (dev_status) {
1385                 case DEVICE_UNCHANGED:
1386                         /* New Device present in existing device list  */
1387                         device->new_device = false;
1388                         same_device->device_gone = false;
1389                         pqisrc_exist_device_update(softs, same_device, device);
1390                         break;
1391                 case DEVICE_NOT_FOUND:
1392                         /* Device not found in existing list */
1393                         device->new_device = true;
1394                         break;
1395                 case DEVICE_CHANGED:
1396                         /* Actual device gone need to add device to list*/
1397                         device->new_device = true;
1398                         break;
1399                 default:
1400                         break;
1401                 }
1402         }
1403         /* Process all devices that have gone away. */
1404         for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) {
1405                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1406                         if(softs->device_list[i][j] == NULL)
1407                                 continue;
1408                         device = softs->device_list[i][j];
1409                         if (device->device_gone) {
1410                                 softs->device_list[device->target][device->lun] = NULL;
1411                                 removed[nremoved] = device;
1412                                 nremoved++;
1413                         }
1414                 }
1415         }
1416
1417         /* Process all new devices. */
1418         for (i = 0, nadded = 0; i < num_new_devices; i++) {
1419                 device = new_device_list[i];
1420                 if (!device->new_device)
1421                         continue;
1422                 if (device->volume_offline)
1423                         continue;
1424                 
1425                 /* physical device */
1426                 if (!pqisrc_is_logical_device(device)) {
1427                         tid = pqisrc_alloc_tid(softs);
1428                         if(INVALID_ELEM != tid)
1429                                 pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
1430                 }
1431
1432                 softs->device_list[device->target][device->lun] = device;
1433                 DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device,
1434                         device->bus,device->target,device->lun);
1435                 /* To prevent this entry from being freed later. */
1436                 new_device_list[i] = NULL;
1437                 added[nadded] = device;
1438                 nadded++;
1439         }
1440
1441         pqisrc_update_log_dev_qdepth(softs);
1442
1443         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1444                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1445                         if(softs->device_list[i][j] == NULL)
1446                                 continue;
1447                         device = softs->device_list[i][j];
1448                         device->offload_enabled = device->offload_enabled_pending;
1449                 }
1450         }
1451
1452         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1453
1454         for(i = 0; i < nremoved; i++) {
1455                 device = removed[i];
1456                 if (device == NULL)
1457                         continue;
1458                 pqisrc_remove_device(softs, device);
1459                 pqisrc_display_device_info(softs, "removed", device);
1460                 
1461         }
1462
1463         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1464                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1465                         if(softs->device_list[i][j] == NULL)
1466                                 continue;
1467                         device = softs->device_list[i][j];
1468                         /*
1469                         * Notify the OS upper layer if the queue depth of any existing device has
1470                         * changed.
1471                         */
1472                         if (device->queue_depth !=
1473                                 device->advertised_queue_depth) {
1474                                 device->advertised_queue_depth = device->queue_depth;
1475                                 /* TBD: Call OS upper layer function to change device Q depth */
1476                         }
1477                 }
1478         }
1479         for(i = 0; i < nadded; i++) {
1480                 device = added[i];
1481                 if (device->expose_device) {
1482                         ret = pqisrc_add_device(softs, device);
1483                         if (ret) {
1484                                 DBG_WARN("scsi %d:%d:%d addition failed, device not added\n",
1485                                         device->bus, device->target,
1486                                         device->lun);
1487                                 pqisrc_adjust_list(softs, device);
1488                                 continue;
1489                         }
1490                 }
1491
1492                 pqisrc_display_device_info(softs, "added", device);
1493         }
1494
1495         /* Process all volumes that are offline. */
1496         for (i = 0; i < num_new_devices; i++) {
1497                 device = new_device_list[i];
1498                 if (!device)
1499                         continue;
1500                 if (!device->new_device)
1501                         continue;
1502                 if (device->volume_offline) {
1503                         pqisrc_display_volume_status(softs, device);
1504                         pqisrc_display_device_info(softs, "offline", device);
1505                 }
1506         }
1507
1508 free_and_out:
1509         if (added)
1510                 os_mem_free(softs, (char *)added,
1511                             sizeof(*added) * PQI_MAX_DEVICES); 
1512         if (removed)
1513                 os_mem_free(softs, (char *)removed,
1514                             sizeof(*removed) * PQI_MAX_DEVICES); 
1515
1516         DBG_FUNC("OUT\n");
1517 }
1518
1519 /*
1520  * Let the Adapter know about driver version using one of BMIC
1521  * BMIC_WRITE_HOST_WELLNESS
1522  */
1523 int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
1524 {
1525         int rval = PQI_STATUS_SUCCESS;
1526         struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
1527         size_t data_length;
1528         pqisrc_raid_req_t request;
1529
1530         DBG_FUNC("IN\n");
1531
1532         memset(&request, 0, sizeof(request));   
1533         data_length = sizeof(*host_wellness_driver_ver);
1534
1535         host_wellness_driver_ver = os_mem_alloc(softs, data_length);
1536         if (!host_wellness_driver_ver) {
1537                 DBG_ERR("failed to allocate memory for host wellness driver_version\n");
1538                 return PQI_STATUS_FAILURE;
1539         }
1540
1541         host_wellness_driver_ver->start_tag[0] = '<';
1542         host_wellness_driver_ver->start_tag[1] = 'H';
1543         host_wellness_driver_ver->start_tag[2] = 'W';
1544         host_wellness_driver_ver->start_tag[3] = '>';
1545         host_wellness_driver_ver->driver_version_tag[0] = 'D';
1546         host_wellness_driver_ver->driver_version_tag[1] = 'V';
1547         host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version));
1548         strncpy(host_wellness_driver_ver->driver_version, softs->os_name,
1549         sizeof(host_wellness_driver_ver->driver_version));
1550     if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) {
1551         strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION,
1552                         sizeof(host_wellness_driver_ver->driver_version) -  strlen(softs->os_name));
1553     } else {
1554         DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n",
1555             strlen(softs->os_name));
1556     }
1557         host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
1558         host_wellness_driver_ver->end_tag[0] = 'Z';
1559         host_wellness_driver_ver->end_tag[1] = 'Z';
1560
1561         rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length,
1562                                         BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1563
1564         os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
1565
1566         DBG_FUNC("OUT");
1567         return rval;
1568 }
1569
1570 /* 
1571  * Write current RTC time from host to the adapter using
1572  * BMIC_WRITE_HOST_WELLNESS
1573  */
1574 int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
1575 {
1576         int rval = PQI_STATUS_SUCCESS;
1577         struct bmic_host_wellness_time *host_wellness_time;
1578         size_t data_length;
1579         pqisrc_raid_req_t request;
1580
1581         DBG_FUNC("IN\n");
1582
1583         memset(&request, 0, sizeof(request));   
1584         data_length = sizeof(*host_wellness_time);
1585
1586         host_wellness_time = os_mem_alloc(softs, data_length);
1587         if (!host_wellness_time) {
1588                 DBG_ERR("failed to allocate memory for host wellness time structure\n");
1589                 return PQI_STATUS_FAILURE;
1590         }
1591
1592         host_wellness_time->start_tag[0] = '<';
1593         host_wellness_time->start_tag[1] = 'H';
1594         host_wellness_time->start_tag[2] = 'W';
1595         host_wellness_time->start_tag[3] = '>';
1596         host_wellness_time->time_tag[0] = 'T';
1597         host_wellness_time->time_tag[1] = 'D';
1598         host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) - 
1599                                                                                         offsetof(struct bmic_host_wellness_time, century));
1600
1601         os_get_time(host_wellness_time);
1602
1603         host_wellness_time->dont_write_tag[0] = 'D';
1604         host_wellness_time->dont_write_tag[1] = 'W';
1605         host_wellness_time->end_tag[0] = 'Z';
1606         host_wellness_time->end_tag[1] = 'Z';
1607
1608         rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length,
1609                                         BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1610
1611         os_mem_free(softs, (char *)host_wellness_time, data_length);
1612
1613         DBG_FUNC("OUT");
1614         return rval;
1615 }
1616
1617 /*
1618  * Function used to perform a rescan of scsi devices
1619  * for any config change events
1620  */
1621 int pqisrc_scan_devices(pqisrc_softstate_t *softs)
1622 {
1623         boolean_t is_physical_device;
1624         int ret = PQI_STATUS_FAILURE;
1625         int i;
1626         int new_dev_cnt;
1627         int phy_log_dev_cnt;
1628         uint8_t *scsi3addr;
1629         uint32_t physical_cnt;
1630         uint32_t logical_cnt;
1631         uint32_t ndev_allocated = 0;
1632         size_t phys_data_length, log_data_length;
1633         reportlun_data_ext_t *physical_dev_list = NULL;
1634         reportlun_data_ext_t *logical_dev_list = NULL;
1635         reportlun_ext_entry_t *lun_ext_entry = NULL;
1636         bmic_ident_physdev_t *bmic_phy_info = NULL;
1637         pqi_scsi_dev_t **new_device_list = NULL;
1638         pqi_scsi_dev_t *device = NULL;
1639
1640         DBG_FUNC("IN\n");
1641
1642         ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
1643                                       &phys_data_length, &log_data_length);
1644
1645         if (ret)
1646                 goto err_out;
1647
1648         physical_cnt = BE_32(physical_dev_list->header.list_length) 
1649                 / sizeof(physical_dev_list->lun_entries[0]);
1650
1651         logical_cnt = BE_32(logical_dev_list->header.list_length)
1652                 / sizeof(logical_dev_list->lun_entries[0]);
1653
1654         DBG_DISC("physical_cnt %d logical_cnt %d\n", physical_cnt, logical_cnt);
1655
1656         if (physical_cnt) {
1657                 bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
1658                 if (bmic_phy_info == NULL) {
1659                         ret = PQI_STATUS_FAILURE;
1660                         DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret);
1661                         goto err_out;
1662                 }
1663         }
1664         phy_log_dev_cnt = physical_cnt + logical_cnt;
1665         new_device_list = os_mem_alloc(softs,
1666                                 sizeof(*new_device_list) * phy_log_dev_cnt);
1667
1668         if (new_device_list == NULL) {
1669                 ret = PQI_STATUS_FAILURE;
1670                 DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1671                 goto err_out;
1672         }
1673
1674         for (i = 0; i < phy_log_dev_cnt; i++) {
1675                 new_device_list[i] = os_mem_alloc(softs,
1676                                                 sizeof(*new_device_list[i]));
1677                 if (new_device_list[i] == NULL) {
1678                         ret = PQI_STATUS_FAILURE;
1679                         DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1680                         ndev_allocated = i;
1681                         goto err_out;
1682                 }
1683         }
1684
1685         ndev_allocated = phy_log_dev_cnt;
1686         new_dev_cnt = 0;
1687         for (i = 0; i < phy_log_dev_cnt; i++) {
1688                 if (i < physical_cnt) {
1689                         is_physical_device = true;
1690                         lun_ext_entry = &physical_dev_list->lun_entries[i];
1691                 } else {
1692                         is_physical_device = false;
1693                         lun_ext_entry =
1694                                 &logical_dev_list->lun_entries[i - physical_cnt];
1695                 }
1696
1697                 scsi3addr = lun_ext_entry->lunid;
1698                 /* Save the target sas adderess for external raid device */
1699                 if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
1700                         int target = lun_ext_entry->lunid[3] & 0x3f;
1701                         softs->target_sas_addr[target] = BE_64(lun_ext_entry->wwid);
1702                 }
1703
1704                 /* Skip masked physical non-disk devices. */
1705                 if (MASKED_DEVICE(scsi3addr) && is_physical_device
1706                                 && (lun_ext_entry->ioaccel_handle == 0))
1707                         continue;
1708
1709                 device = new_device_list[new_dev_cnt];
1710                 memset(device, 0, sizeof(*device));
1711                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1712                 device->wwid = lun_ext_entry->wwid;
1713                 device->is_physical_device = is_physical_device;
1714                 if (!is_physical_device)
1715                         device->is_external_raid_device =
1716                                 pqisrc_is_external_raid_addr(scsi3addr);
1717                 
1718
1719                 /* Get device type, vendor, model, device ID. */
1720                 ret = pqisrc_get_dev_data(softs, device);
1721                 if (ret) {
1722                         DBG_WARN("Inquiry failed, skipping device %016llx\n",
1723                                  (unsigned long long)BE_64(device->scsi3addr[0]));
1724                         DBG_DISC("INQUIRY FAILED \n");
1725                         continue;
1726                 }
1727                 pqisrc_assign_btl(device);
1728
1729                 /*
1730                  * Expose all devices except for physical devices that
1731                  * are masked.
1732                  */
1733                 if (device->is_physical_device &&
1734                         MASKED_DEVICE(scsi3addr))
1735                         device->expose_device = false;
1736                 else
1737                         device->expose_device = true;
1738
1739                 if (device->is_physical_device &&
1740                     (lun_ext_entry->device_flags &
1741                      REPORT_LUN_DEV_FLAG_AIO_ENABLED) &&
1742                      lun_ext_entry->ioaccel_handle) {
1743                         device->aio_enabled = true;
1744                 }
1745                 switch (device->devtype) {
1746                 case ROM_DEVICE:
1747                         /*
1748                          * We don't *really* support actual CD-ROM devices,
1749                          * but we do support the HP "One Button Disaster
1750                          * Recovery" tape drive which temporarily pretends to
1751                          * be a CD-ROM drive.
1752                          */
1753                         if (device->is_obdr_device)
1754                                 new_dev_cnt++;
1755                         break;
1756                 case DISK_DEVICE:
1757                 case ZBC_DEVICE:
1758                         if (device->is_physical_device) {
1759                                 device->ioaccel_handle =
1760                                         lun_ext_entry->ioaccel_handle;
1761                                 device->sas_address = BE_64(lun_ext_entry->wwid);
1762                                 pqisrc_get_physical_device_info(softs, device,
1763                                         bmic_phy_info);
1764                         }
1765                         new_dev_cnt++;
1766                         break;
1767                 case ENCLOSURE_DEVICE:
1768                         if (device->is_physical_device) {
1769                                 device->sas_address = BE_64(lun_ext_entry->wwid);
1770                         }
1771                         new_dev_cnt++;
1772                         break;  
1773                 case TAPE_DEVICE:
1774                 case MEDIUM_CHANGER_DEVICE:
1775                         new_dev_cnt++;
1776                         break;
1777                 case RAID_DEVICE:
1778                         /*
1779                          * Only present the HBA controller itself as a RAID
1780                          * controller.  If it's a RAID controller other than
1781                          * the HBA itself (an external RAID controller, MSA500
1782                          * or similar), don't present it.
1783                          */
1784                         if (pqisrc_is_hba_lunid(scsi3addr))
1785                                 new_dev_cnt++;
1786                         break;
1787                 case SES_DEVICE:
1788                 case CONTROLLER_DEVICE:
1789                         break;
1790                 }
1791         }
1792         DBG_DISC("new_dev_cnt %d\n", new_dev_cnt);
1793
1794         pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
1795
1796 err_out:
1797         if (new_device_list) {
1798                 for (i = 0; i < ndev_allocated; i++) {
1799                         if (new_device_list[i]) {
1800                                 if(new_device_list[i]->raid_map)
1801                                         os_mem_free(softs, (char *)new_device_list[i]->raid_map,
1802                                                                                 sizeof(pqisrc_raid_map_t));
1803                                 os_mem_free(softs, (char*)new_device_list[i],
1804                                                                 sizeof(*new_device_list[i]));
1805                         }
1806                 }
1807                 os_mem_free(softs, (char *)new_device_list,
1808                                         sizeof(*new_device_list) * ndev_allocated); 
1809         }
1810         if(physical_dev_list)
1811                 os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
1812         if(logical_dev_list)
1813                 os_mem_free(softs, (char *)logical_dev_list, log_data_length);
1814         if (bmic_phy_info)
1815                 os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
1816
1817         DBG_FUNC("OUT \n");
1818
1819         return ret;
1820 }
1821
1822 /*
1823  * Clean up memory allocated for devices.
1824  */
1825 void pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
1826 {
1827
1828         int i = 0,j = 0;
1829         pqi_scsi_dev_t *dvp = NULL;
1830         DBG_FUNC("IN\n");
1831
1832         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1833                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1834                         if (softs->device_list[i][j] == NULL) 
1835                                 continue;
1836                         dvp = softs->device_list[i][j];
1837                         pqisrc_device_mem_free(softs, dvp);
1838                 }
1839         }
1840         DBG_FUNC("OUT\n");
1841 }