]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_discovery.c
Merge llvm trunk r338150, and resolve conflicts.
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_discovery.c
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /* $FreeBSD$ */
28
29 #include "smartpqi_includes.h"
30
31 /* Validate the scsi sense response code */
32 static inline boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
33 {
34         DBG_FUNC("IN\n");
35
36         if (!sshdr)
37                 return false;
38
39         DBG_FUNC("OUT\n");
40
41         return (sshdr->response_code & 0x70) == 0x70;
42 }
43
44 /* Initialize target ID pool for HBA/PDs */
45 void  pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
46 {
47         int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1;
48
49         for(i = 0; i < PQI_MAX_PHYSICALS; i++) {
50                 softs->tid_pool.tid[i] = tid--;
51         }
52         softs->tid_pool.index = i - 1;
53 }
54
55 int pqisrc_alloc_tid(pqisrc_softstate_t *softs)
56 {
57         if(softs->tid_pool.index <= -1) {
58                 DBG_ERR("Target ID exhausted\n");
59                 return INVALID_ELEM;
60         }
61         
62         return  softs->tid_pool.tid[softs->tid_pool.index--];
63 }
64
65 void pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
66 {
67         if(softs->tid_pool.index >= PQI_MAX_PHYSICALS) {
68                 DBG_ERR("Target ID queue is full\n");
69                 return;
70         }
71         
72         softs->tid_pool.index++;
73         softs->tid_pool.tid[softs->tid_pool.index] = tid;
74 }
75
76 /* Update scsi sense info to a local buffer*/
77 boolean_t pqisrc_update_scsi_sense(const uint8_t *buff, int len,
78                               struct sense_header_scsi *header)
79 {
80
81         DBG_FUNC("IN\n");
82
83         if (!buff || !len)
84                 return false;
85
86         memset(header, 0, sizeof(struct sense_header_scsi));
87
88         header->response_code = (buff[0] & 0x7f);
89
90         if (!pqisrc_scsi_sense_valid(header))
91                 return false;
92
93         if (header->response_code >= 0x72) {
94                 /* descriptor format */
95                 if (len > 1)
96                         header->sense_key = (buff[1] & 0xf);
97                 if (len > 2)
98                         header->asc = buff[2];
99                 if (len > 3)
100                         header->ascq = buff[3];
101                 if (len > 7)
102                         header->additional_length = buff[7];
103         } else {
104                  /* fixed format */
105                 if (len > 2)
106                         header->sense_key = (buff[2] & 0xf);
107                 if (len > 7) {
108                         len = (len < (buff[7] + 8)) ?
109                                         len : (buff[7] + 8);
110                         if (len > 12)
111                                 header->asc = buff[12];
112                         if (len > 13)
113                                 header->ascq = buff[13];
114                 }
115         }
116
117         DBG_FUNC("OUT\n");
118
119         return true;
120 }
121
122 /*
123  * Function used to build the internal raid request and analyze the response
124  */
125 int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs,  pqisrc_raid_req_t *request,
126                             void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
127                             raid_path_error_info_elem_t *error_info)
128 {
129         
130         uint8_t *cdb;
131         int ret = PQI_STATUS_SUCCESS;
132         uint32_t tag = 0;
133         struct dma_mem device_mem;
134         sgt_t *sgd;
135
136         ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
137         ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
138
139         rcb_t *rcb = NULL;
140         
141         DBG_FUNC("IN\n");
142
143         memset(&device_mem, 0, sizeof(struct dma_mem));
144
145         /* for TUR datasize: 0 buff: NULL */
146         if (datasize) {
147                 device_mem.tag = "device_mem";
148                 device_mem.size = datasize;
149                 device_mem.align = PQISRC_DEFAULT_DMA_ALIGN;
150
151                 ret = os_dma_mem_alloc(softs, &device_mem);
152         
153                 if (ret) {
154                         DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
155                         return ret;
156                 }
157
158                 sgd = (sgt_t *)&request->sg_descriptors[0];
159
160                 sgd->addr = device_mem.dma_addr;
161                 sgd->len = datasize;
162                 sgd->flags = SG_FLAG_LAST;
163
164         }
165
166         /* Build raid path request */
167         request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
168
169         request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t,
170                                                         sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH);
171         request->buffer_length = LE_32(datasize);
172         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
173         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
174         request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
175
176         cdb = request->cdb;
177
178         switch (cmd) {
179         case SA_INQUIRY:
180                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
181                 cdb[0] = SA_INQUIRY;
182                 if (vpd_page & VPD_PAGE) {
183                         cdb[1] = 0x1;
184                         cdb[2] = (uint8_t)vpd_page;
185                 }
186                 cdb[4] = (uint8_t)datasize;
187                 break;
188         case SA_REPORT_LOG:
189         case SA_REPORT_PHYS:
190                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
191                 cdb[0] = cmd;
192                 if (cmd == SA_REPORT_PHYS)
193                         cdb[1] = SA_REPORT_PHYS_EXTENDED;
194                 else
195                 cdb[1] = SA_REPORT_LOG_EXTENDED;
196                 cdb[8] = (uint8_t)((datasize) >> 8);
197                 cdb[9] = (uint8_t)datasize;
198                 break;
199         case TEST_UNIT_READY:
200                 request->data_direction = SOP_DATA_DIR_NONE;
201                 break;
202         case SA_GET_RAID_MAP:
203                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
204                 cdb[0] = SA_CISS_READ;
205                 cdb[1] = cmd;
206                 cdb[8] = (uint8_t)((datasize) >> 8);
207                 cdb[9] = (uint8_t)datasize;
208                 break;
209         case SA_CACHE_FLUSH:
210                 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
211                 memcpy(device_mem.virt_addr, buff, datasize);
212                 cdb[0] = BMIC_WRITE;
213                 cdb[6] = BMIC_CACHE_FLUSH;
214                 cdb[7] = (uint8_t)((datasize)  << 8);
215                 cdb[8] = (uint8_t)((datasize)  >> 8);
216                 break;
217         case BMIC_IDENTIFY_CONTROLLER:
218         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
219                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
220                 cdb[0] = BMIC_READ;
221                 cdb[6] = cmd;
222                 cdb[7] = (uint8_t)((datasize)  << 8);
223                 cdb[8] = (uint8_t)((datasize)  >> 8);
224                 break;
225         case BMIC_WRITE_HOST_WELLNESS:
226                 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
227                 memcpy(device_mem.virt_addr, buff, datasize);
228                 cdb[0] = BMIC_WRITE;
229                 cdb[6] = cmd;
230                 cdb[7] = (uint8_t)((datasize)  << 8);
231                 cdb[8] = (uint8_t)((datasize)  >> 8);
232                 break;
233         case BMIC_SENSE_SUBSYSTEM_INFORMATION:
234                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
235                 cdb[0] = BMIC_READ;
236                 cdb[6] = cmd;
237                 cdb[7] = (uint8_t)((datasize)  << 8);
238                 cdb[8] = (uint8_t)((datasize)  >> 8);
239                 break;  
240         default:
241                 DBG_ERR("unknown command 0x%x", cmd);
242                 break;
243         }
244
245         tag = pqisrc_get_tag(&softs->taglist);
246         if (INVALID_ELEM == tag) {
247                 DBG_ERR("Tag not available\n");
248                 ret = PQI_STATUS_FAILURE;
249                 goto err_notag;
250         }
251
252         ((pqisrc_raid_req_t *)request)->request_id = tag;
253         ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id;
254         ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id;
255         rcb = &softs->rcb[tag];
256         rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
257         rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
258
259         rcb->req_pending = true;
260         rcb->tag = tag;
261         /* Submit Command */
262         ret = pqisrc_submit_cmnd(softs, ib_q, request);
263
264         if (ret != PQI_STATUS_SUCCESS) {
265                 DBG_ERR("Unable to submit command\n");
266                 goto err_out;
267         }
268
269         ret = pqisrc_wait_on_condition(softs, rcb);
270         if (ret != PQI_STATUS_SUCCESS) {
271                 DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
272                 goto err_out;
273         }
274
275         if (datasize) {
276                 if (buff) {
277                         memcpy(buff, device_mem.virt_addr, datasize);
278                 }
279                 os_dma_mem_free(softs, &device_mem);
280         }
281         
282         ret = rcb->status;
283         if (ret) {
284                 if(error_info) {
285                         memcpy(error_info, 
286                                rcb->error_info,
287                                sizeof(*error_info));
288
289                         if (error_info->data_out_result ==
290                             PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
291                                 ret = PQI_STATUS_SUCCESS;
292                         }
293                         else{
294                                 DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x," 
295                                         "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr), 
296                                         BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), 
297                                         cmd, ret);
298                                 ret = PQI_STATUS_FAILURE;
299                         }
300                 }
301         } else {
302                 if(error_info) {
303                         ret = PQI_STATUS_SUCCESS;
304                         memset(error_info, 0, sizeof(*error_info));
305                 }
306         }
307
308         os_reset_rcb(rcb);
309         pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
310         DBG_FUNC("OUT\n");
311         return ret;
312
313 err_out:
314         DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n", 
315                 BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), 
316                 cmd, ret);
317         os_reset_rcb(rcb);
318         pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
319 err_notag:
320         if (datasize)
321                 os_dma_mem_free(softs, &device_mem);
322         DBG_FUNC("FAILED \n");
323         return ret;
324 }
325
326 /* common function used to send report physical and logical luns cmnds*/
327 static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
328         void *buff, size_t buf_len)
329 {
330         int ret;
331         pqisrc_raid_req_t request;
332
333         DBG_FUNC("IN\n");
334
335         memset(&request, 0, sizeof(request));
336         ret =  pqisrc_build_send_raid_request(softs, &request, buff, 
337                                 buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
338
339         DBG_FUNC("OUT\n");
340
341         return ret;
342 }
343
344 /* subroutine used to get physical and logical luns of the device */
345 static int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
346                 reportlun_data_ext_t **buff, size_t *data_length)
347 {
348         int ret;
349         size_t list_len;
350         size_t data_len;
351         size_t new_lun_list_length;
352         reportlun_data_ext_t *lun_data;
353         reportlun_header_t report_lun_header;
354
355         DBG_FUNC("IN\n");
356
357         ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
358                 sizeof(report_lun_header));
359
360         if (ret) {
361                 DBG_ERR("failed return code: %d\n", ret);
362                 return ret;
363         }
364         list_len = BE_32(report_lun_header.list_length);
365
366 retry:
367         data_len = sizeof(reportlun_header_t) + list_len;
368         *data_length = data_len;
369
370         lun_data = os_mem_alloc(softs, data_len);
371
372         if (!lun_data) {
373                 DBG_ERR("failed to allocate memory for lun_data\n");
374                 return PQI_STATUS_FAILURE;
375         }
376                 
377         if (list_len == 0) {
378                 DBG_DISC("list_len is 0\n");
379                 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
380                 goto out;
381         }
382
383         ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
384
385         if (ret) {
386                 DBG_ERR("error\n");
387                 goto error;
388         }
389
390         new_lun_list_length = BE_32(lun_data->header.list_length);
391
392         if (new_lun_list_length > list_len) {
393                 list_len = new_lun_list_length;
394                 os_mem_free(softs, (void *)lun_data, data_len);
395                 goto retry;
396         }
397
398 out:
399         *buff = lun_data;
400         DBG_FUNC("OUT\n");
401         return 0;
402
403 error:
404         os_mem_free(softs, (void *)lun_data, data_len);
405         DBG_ERR("FAILED\n");
406         return ret;
407 }
408
409 /*
410  * Function used to get physical and logical device list
411  */
412 static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
413         reportlun_data_ext_t **physical_dev_list,
414         reportlun_data_ext_t **logical_dev_list, 
415         size_t *phys_data_length,
416         size_t *log_data_length)
417 {
418         int ret = PQI_STATUS_SUCCESS;
419         size_t logical_list_length;
420         size_t logdev_data_length;
421         size_t data_length;
422         reportlun_data_ext_t *local_logdev_list;
423         reportlun_data_ext_t *logdev_data;
424         reportlun_header_t report_lun_header;
425         
426
427         DBG_FUNC("IN\n");
428
429         ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length);
430         if (ret) {
431                 DBG_ERR("report physical LUNs failed");
432                 return ret;
433         }
434
435         ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length);
436         if (ret) {
437                 DBG_ERR("report logical LUNs failed");
438                 return ret;
439         }
440
441
442         logdev_data = *logical_dev_list;
443
444         if (logdev_data) {
445                 logical_list_length =
446                         BE_32(logdev_data->header.list_length);
447         } else {
448                 memset(&report_lun_header, 0, sizeof(report_lun_header));
449                 logdev_data =
450                         (reportlun_data_ext_t *)&report_lun_header;
451                 logical_list_length = 0;
452         }
453
454         logdev_data_length = sizeof(reportlun_header_t) +
455                 logical_list_length;
456
457         /* Adding LOGICAL device entry for controller */
458         local_logdev_list = os_mem_alloc(softs,
459                                             logdev_data_length + sizeof(reportlun_ext_entry_t));
460         if (!local_logdev_list) {
461                 data_length = *log_data_length;
462                 os_mem_free(softs, (char *)*logical_dev_list, data_length);
463                 *logical_dev_list = NULL;
464                 return PQI_STATUS_FAILURE;
465         }
466
467         memcpy(local_logdev_list, logdev_data, logdev_data_length);
468         memset((uint8_t *)local_logdev_list + logdev_data_length, 0,
469                 sizeof(reportlun_ext_entry_t));
470         local_logdev_list->header.list_length = BE_32(logical_list_length +
471                                                         sizeof(reportlun_ext_entry_t));
472         data_length = *log_data_length;
473         os_mem_free(softs, (char *)*logical_dev_list, data_length);
474         *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t);
475         *logical_dev_list = local_logdev_list;
476
477         DBG_FUNC("OUT\n");
478
479         return ret;
480 }
481
482 /* Subroutine used to set Bus-Target-Lun for the requested device */
483 static inline void pqisrc_set_btl(pqi_scsi_dev_t *device,
484         int bus, int target, int lun)
485 {
486         DBG_FUNC("IN\n");
487
488         device->bus = bus;
489         device->target = target;
490         device->lun = lun;
491
492         DBG_FUNC("OUT\n");
493 }
494
495 inline boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
496 {
497         return device->is_external_raid_device;
498 }
499
500 static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
501 {
502         return scsi3addr[2] != 0;
503 }
504
505 /* Function used to assign Bus-Target-Lun for the requested device */
506 static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
507 {
508         uint8_t *scsi3addr;
509         uint32_t lunid;
510         uint32_t bus;
511         uint32_t target;
512         uint32_t lun;
513         DBG_FUNC("IN\n");
514
515         scsi3addr = device->scsi3addr;
516         lunid = GET_LE32(scsi3addr);
517
518         if (pqisrc_is_hba_lunid(scsi3addr)) {
519                 /* The specified device is the controller. */
520                 pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, lunid & 0x3fff);
521                 device->target_lun_valid = true;
522                 return;
523         }
524
525         if (pqisrc_is_logical_device(device)) {
526                 if (pqisrc_is_external_raid_device(device)) {
527                         DBG_DISC("External Raid Device!!!");
528                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
529                         target = (lunid >> 16) & 0x3fff;
530                         lun = lunid & 0xff;
531                 } else {
532                         bus = PQI_RAID_VOLUME_BUS;
533                         lun = 0;
534                         target = lunid & 0x3fff;
535                 }
536                 pqisrc_set_btl(device, bus, target, lun);
537                 device->target_lun_valid = true;
538                 return;
539         }
540
541         DBG_FUNC("OUT\n");
542 }
543
544 /* Build and send the internal INQUIRY command to particular device */
545 static int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
546         uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
547 {
548         int ret = PQI_STATUS_SUCCESS;
549         pqisrc_raid_req_t request;
550         raid_path_error_info_elem_t error_info;
551
552         DBG_FUNC("IN\n");
553
554         memset(&request, 0, sizeof(request));
555         ret =  pqisrc_build_send_raid_request(softs, &request, buff, buf_len, 
556                                                                 SA_INQUIRY, vpd_page, scsi3addr, &error_info);
557
558         DBG_FUNC("OUT\n");
559         return ret;
560 }
561
562 /* Function used to parse the sense information from response */
563 static void pqisrc_fetch_sense_info(const uint8_t *sense_data,
564         unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
565 {
566         struct sense_header_scsi header;
567
568         DBG_FUNC("IN\n");
569
570         *sense_key = 0;
571         *ascq = 0;
572         *asc = 0;
573
574         if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) {
575                 *sense_key = header.sense_key;
576                 *asc = header.asc;
577                 *ascq = header.ascq;
578         }
579
580         DBG_DISC("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq);
581
582         DBG_FUNC("OUT\n");
583 }
584
585 /* Function used to validate volume offline status */
586 static uint8_t pqisrc_get_volume_offline_status(pqisrc_softstate_t *softs,
587         uint8_t *scsi3addr)
588 {
589         int ret = PQI_STATUS_SUCCESS;
590         uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
591         uint8_t size;
592         uint8_t *buff = NULL;
593
594         DBG_FUNC("IN\n");
595         
596         buff = os_mem_alloc(softs, 64);
597         if (!buff)
598                 return PQI_STATUS_FAILURE;
599
600         /* Get the size of the VPD return buff. */
601         ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
602                 buff, SCSI_VPD_HEADER_LENGTH);
603
604         if (ret)
605                 goto out;
606
607         size = buff[3];
608
609         /* Now get the whole VPD buff. */
610         ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
611                 buff, size + SCSI_VPD_HEADER_LENGTH);
612         if (ret)
613                 goto out;
614
615         status = buff[4];
616
617 out:
618         os_mem_free(softs, (char *)buff, 64);
619         DBG_FUNC("OUT\n");
620
621         return status;
622 }
623
624
625 /* Determine offline status of a volume.  Returns appropriate SA_LV_* status.*/
626 static uint8_t pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
627         uint8_t *scsi3addr)
628 {
629         int ret = PQI_STATUS_SUCCESS;
630         uint8_t *sense_data;
631         unsigned sense_data_len;
632         uint8_t sense_key;
633         uint8_t asc;
634         uint8_t ascq;
635         uint8_t off_status;
636         uint8_t scsi_status;
637         pqisrc_raid_req_t request;
638         raid_path_error_info_elem_t error_info;
639
640         DBG_FUNC("IN\n");
641
642         memset(&request, 0, sizeof(request));   
643         ret =  pqisrc_build_send_raid_request(softs, &request, NULL, 0, 
644                                 TEST_UNIT_READY, 0, scsi3addr, &error_info);
645         
646         if (ret)
647                 goto error;
648         sense_data = error_info.data;
649         sense_data_len = LE_16(error_info.sense_data_len);
650
651         if (sense_data_len > sizeof(error_info.data))
652                 sense_data_len = sizeof(error_info.data);
653
654         pqisrc_fetch_sense_info(sense_data, sense_data_len, &sense_key, &asc,
655                 &ascq);
656
657         scsi_status = error_info.status;
658
659         /* scsi status: "CHECK CONDN" /  SK: "not ready" ? */
660         if (scsi_status != 2 ||
661             sense_key != 2 ||
662             asc != ASC_LUN_NOT_READY) {
663                 return SA_LV_OK;
664         }
665
666         /* Determine the reason for not ready state. */
667         off_status = pqisrc_get_volume_offline_status(softs, scsi3addr);
668
669         DBG_DISC("offline_status 0x%x\n", off_status);
670
671         /* Keep volume offline in certain cases. */
672         switch (off_status) {
673         case SA_LV_UNDERGOING_ERASE:
674         case SA_LV_NOT_AVAILABLE:
675         case SA_LV_UNDERGOING_RPI:
676         case SA_LV_PENDING_RPI:
677         case SA_LV_ENCRYPTED_NO_KEY:
678         case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
679         case SA_LV_UNDERGOING_ENCRYPTION:
680         case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
681         case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
682                 return off_status;
683         case SA_LV_STATUS_VPD_UNSUPPORTED:
684                 /*
685                  * If the VPD status page isn't available,
686                  * use ASC/ASCQ to determine state.
687                  */
688                 if (ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS ||
689                     ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)
690                         return off_status;
691                 break;
692         }
693
694         DBG_FUNC("OUT\n");
695
696         return SA_LV_OK;
697
698 error:
699         return SA_LV_STATUS_VPD_UNSUPPORTED;
700 }
701
702 /* Validate the RAID map parameters */
703 static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
704         pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
705 {
706         char *error_msg;
707         uint32_t raidmap_size;
708         uint32_t r5or6_blocks_per_row;
709         unsigned phys_dev_num;
710         unsigned num_raidmap_entries;
711
712         DBG_FUNC("IN\n");
713
714         raidmap_size = LE_32(raid_map->structure_size);
715         if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) {
716                 error_msg = "RAID map too small\n";
717                 goto error;
718         }
719
720         if (raidmap_size > sizeof(*raid_map)) {
721                 error_msg = "RAID map too large\n";
722                 goto error;
723         }
724
725         phys_dev_num = LE_16(raid_map->layout_map_count) *
726                 (LE_16(raid_map->data_disks_per_row) +
727                 LE_16(raid_map->metadata_disks_per_row));
728         num_raidmap_entries = phys_dev_num *
729                 LE_16(raid_map->row_cnt);
730
731         if (num_raidmap_entries > RAID_MAP_MAX_ENTRIES) {
732                 error_msg = "invalid number of map entries in RAID map\n";
733                 goto error;
734         }
735
736         if (device->raid_level == SA_RAID_1) {
737                 if (LE_16(raid_map->layout_map_count) != 2) {
738                         error_msg = "invalid RAID-1 map\n";
739                         goto error;
740                 }
741         } else if (device->raid_level == SA_RAID_ADM) {
742                 if (LE_16(raid_map->layout_map_count) != 3) {
743                         error_msg = "invalid RAID-1(ADM) map\n";
744                         goto error;
745                 }
746         } else if ((device->raid_level == SA_RAID_5 ||
747                 device->raid_level == SA_RAID_6) &&
748                 LE_16(raid_map->layout_map_count) > 1) {
749                 /* RAID 50/60 */
750                 r5or6_blocks_per_row =
751                         LE_16(raid_map->strip_size) *
752                         LE_16(raid_map->data_disks_per_row);
753                 if (r5or6_blocks_per_row == 0) {
754                         error_msg = "invalid RAID-5 or RAID-6 map\n";
755                         goto error;
756                 }
757         }
758
759         DBG_FUNC("OUT\n");
760
761         return 0;
762
763 error:
764         DBG_ERR("%s\n", error_msg);
765         return PQI_STATUS_FAILURE;
766 }
767
768 /* Get device raidmap for the requested device */
769 static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
770         pqi_scsi_dev_t *device)
771 {
772         int ret = PQI_STATUS_SUCCESS;
773         pqisrc_raid_req_t request;
774         pqisrc_raid_map_t *raid_map;
775
776         DBG_FUNC("IN\n");
777
778         raid_map = os_mem_alloc(softs, sizeof(*raid_map));
779         if (!raid_map)
780                 return PQI_STATUS_FAILURE;
781
782         memset(&request, 0, sizeof(request));
783         ret =  pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map), 
784                                         SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
785
786         if (ret) {
787                 DBG_ERR("error in build send raid req ret=%d\n", ret);
788                 goto err_out;
789         }
790
791         ret = pqisrc_raid_map_validation(softs, device, raid_map);
792         if (ret) {
793                 DBG_ERR("error in raid map validation ret=%d\n", ret);
794                 goto err_out;
795         }
796
797         device->raid_map = raid_map;
798         DBG_FUNC("OUT\n");
799         return 0;
800
801 err_out:
802         os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
803         DBG_FUNC("FAILED \n");
804         return ret;
805 }
806
807 /* Get device ioaccel_status to validate the type of device */
808 static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
809         pqi_scsi_dev_t *device)
810 {
811         int ret = PQI_STATUS_SUCCESS;
812         uint8_t *buff;
813         uint8_t ioaccel_status;
814
815         DBG_FUNC("IN\n");
816
817         buff = os_mem_alloc(softs, 64);
818         if (!buff)
819                 return;
820
821         ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
822                                         VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64);
823         if (ret) {
824                 DBG_ERR("error in send scsi inquiry ret=%d\n", ret);
825                 goto err_out;
826         }
827         
828         ioaccel_status = buff[IOACCEL_STATUS_BYTE];
829         device->offload_config =
830                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
831
832         if (device->offload_config) {
833                 device->offload_enabled_pending =
834                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
835                 if (pqisrc_get_device_raidmap(softs, device))
836                         device->offload_enabled_pending = false;
837         }
838         
839         DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n", 
840                         device->offload_config, device->offload_enabled_pending);
841
842 err_out:
843         os_mem_free(softs, (char*)buff, 64);
844         DBG_FUNC("OUT\n");
845 }
846
847 /* Get RAID level of requested device */
848 static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs,
849         pqi_scsi_dev_t *device)
850 {
851         uint8_t raid_level;
852         uint8_t *buff;
853
854         DBG_FUNC("IN\n");
855
856         raid_level = SA_RAID_UNKNOWN;
857
858         buff = os_mem_alloc(softs, 64);
859         if (buff) {
860                 int ret;
861                 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
862                         VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64);
863                 if (ret == 0) {
864                         raid_level = buff[8];
865                         if (raid_level > SA_RAID_MAX)
866                                 raid_level = SA_RAID_UNKNOWN;
867                 }
868                 os_mem_free(softs, (char*)buff, 64);
869         }
870
871         device->raid_level = raid_level;
872         DBG_DISC("RAID LEVEL: %x \n",  raid_level);
873         DBG_FUNC("OUT\n");
874 }
875
876 /* Parse the inquiry response and determine the type of device */
877 static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
878         pqi_scsi_dev_t *device)
879 {
880         int ret = PQI_STATUS_SUCCESS;
881         uint8_t *inq_buff;
882
883         DBG_FUNC("IN\n");
884
885         inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE);
886         if (!inq_buff)
887                 return PQI_STATUS_FAILURE;
888
889         /* Send an inquiry to the device to see what it is. */
890         ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
891                 OBDR_TAPE_INQ_SIZE);
892         if (ret)
893                 goto err_out;
894         pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
895         pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
896
897         device->devtype = inq_buff[0] & 0x1f;
898         memcpy(device->vendor, &inq_buff[8],
899                 sizeof(device->vendor));
900         memcpy(device->model, &inq_buff[16],
901                 sizeof(device->model));
902         DBG_DISC("DEV_TYPE: %x VENDOR: %s MODEL: %s\n",  device->devtype, device->vendor, device->model);
903
904         if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
905                 if (pqisrc_is_external_raid_device(device)) {
906                         device->raid_level = SA_RAID_UNKNOWN;
907                         device->volume_status = SA_LV_OK;
908                         device->volume_offline = false;
909                 } 
910                 else {
911                         pqisrc_get_dev_raid_level(softs, device);
912                         pqisrc_get_dev_ioaccel_status(softs, device);
913                         device->volume_status = pqisrc_get_dev_vol_status(softs,
914                                                 device->scsi3addr);
915                         device->volume_offline = device->volume_status != SA_LV_OK;
916                 }
917         }
918
919         /*
920          * Check if this is a One-Button-Disaster-Recovery device
921          * by looking for "$DR-10" at offset 43 in the inquiry data.
922          */
923         device->is_obdr_device = (device->devtype == ROM_DEVICE &&
924                 memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG,
925                         OBDR_SIG_LEN) == 0);
926 err_out:
927         os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE);
928
929         DBG_FUNC("OUT\n");
930         return ret;
931 }
932
933 /*
934  * BMIC (Basic Management And Interface Commands) command
935  * to get the controller identify params
936  */
937 static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs,
938         bmic_ident_ctrl_t *buff)
939 {
940         int ret = PQI_STATUS_SUCCESS;
941         pqisrc_raid_req_t request;
942
943         DBG_FUNC("IN\n");
944
945         memset(&request, 0, sizeof(request));   
946         ret =  pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff), 
947                                 BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
948         DBG_FUNC("OUT\n");
949
950         return ret;
951 }
952
953 /* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
954 int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
955 {
956         int ret = PQI_STATUS_SUCCESS;
957         bmic_ident_ctrl_t *identify_ctrl;
958
959         DBG_FUNC("IN\n");
960
961         identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl));
962         if (!identify_ctrl) {
963                 DBG_ERR("failed to allocate memory for identify_ctrl\n");
964                 return PQI_STATUS_FAILURE;
965         }
966
967         memset(identify_ctrl, 0, sizeof(*identify_ctrl));
968
969         ret = pqisrc_identify_ctrl(softs, identify_ctrl);
970         if (ret)
971                 goto out;
972      
973         softs->fw_build_number = identify_ctrl->fw_build_number;
974         memcpy(softs->fw_version, identify_ctrl->fw_version,
975                 sizeof(identify_ctrl->fw_version));
976         softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0';
977         snprintf(softs->fw_version +
978                 strlen(softs->fw_version),
979                 sizeof(softs->fw_version),
980                 "-%u", identify_ctrl->fw_build_number);
981 out:
982         os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
983         DBG_INIT("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
984         DBG_FUNC("OUT\n");
985         return ret;
986 }
987
988 /* BMIC command to determine scsi device identify params */
989 static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
990         pqi_scsi_dev_t *device,
991         bmic_ident_physdev_t *buff,
992         int buf_len)
993 {
994         int ret = PQI_STATUS_SUCCESS;
995         uint16_t bmic_device_index;
996         pqisrc_raid_req_t request;
997
998
999         DBG_FUNC("IN\n");
1000
1001         memset(&request, 0, sizeof(request));   
1002         bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
1003         request.cdb[2] = (uint8_t)bmic_device_index;
1004         request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
1005
1006         ret =  pqisrc_build_send_raid_request(softs, &request, buff, buf_len, 
1007                                 BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1008         DBG_FUNC("OUT\n");
1009         return ret;
1010 }
1011
1012 /*
1013  * Function used to get the scsi device information using one of BMIC
1014  * BMIC_IDENTIFY_PHYSICAL_DEVICE
1015  */
1016 static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
1017         pqi_scsi_dev_t *device,
1018         bmic_ident_physdev_t *id_phys)
1019 {
1020         int ret = PQI_STATUS_SUCCESS;
1021
1022         DBG_FUNC("IN\n");
1023         memset(id_phys, 0, sizeof(*id_phys));
1024
1025         ret= pqisrc_identify_physical_disk(softs, device,
1026                 id_phys, sizeof(*id_phys));
1027         if (ret) {
1028                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1029                 return;
1030         }
1031
1032         device->queue_depth =
1033                 LE_16(id_phys->current_queue_depth_limit);
1034         device->device_type = id_phys->device_type;
1035         device->active_path_index = id_phys->active_path_number;
1036         device->path_map = id_phys->redundant_path_present_map;
1037         memcpy(&device->box,
1038                 &id_phys->alternate_paths_phys_box_on_port,
1039                 sizeof(device->box));
1040         memcpy(&device->phys_connector,
1041                 &id_phys->alternate_paths_phys_connector,
1042                 sizeof(device->phys_connector));
1043         device->bay = id_phys->phys_bay_in_box;
1044
1045         DBG_DISC("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n",  device->device_type, device->queue_depth);
1046         DBG_FUNC("OUT\n");
1047 }
1048
1049
1050 /* Function used to find the entry of the device in a list */
1051 static device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
1052         pqi_scsi_dev_t *device_to_find,
1053         pqi_scsi_dev_t **same_device)
1054 {
1055         pqi_scsi_dev_t *device;
1056         int i,j;
1057         DBG_FUNC("IN\n");
1058         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1059                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1060                         if(softs->device_list[i][j] == NULL)
1061                                 continue;
1062                         device = softs->device_list[i][j];
1063                         if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
1064                                 device->scsi3addr)) {
1065                                 *same_device = device;
1066                                 if (pqisrc_device_equal(device_to_find, device)) {
1067                                         if (device_to_find->volume_offline)
1068                                                 return DEVICE_CHANGED;
1069                                         return DEVICE_UNCHANGED;
1070                                 }
1071                                 return DEVICE_CHANGED;
1072                         }
1073                 }
1074         }
1075         DBG_FUNC("OUT\n");
1076
1077         return DEVICE_NOT_FOUND;
1078 }
1079
1080
1081 /* Update the newly added devices as existed device */
1082 static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
1083         pqi_scsi_dev_t *device_exist,
1084         pqi_scsi_dev_t *new_device)
1085 {
1086         DBG_FUNC("IN\n");
1087         device_exist->expose_device = new_device->expose_device;
1088         memcpy(device_exist->vendor, new_device->vendor,
1089                 sizeof(device_exist->vendor));
1090         memcpy(device_exist->model, new_device->model,
1091                 sizeof(device_exist->model));
1092         device_exist->is_physical_device = new_device->is_physical_device;
1093         device_exist->is_external_raid_device =
1094                 new_device->is_external_raid_device;
1095         device_exist->sas_address = new_device->sas_address;
1096         device_exist->raid_level = new_device->raid_level;
1097         device_exist->queue_depth = new_device->queue_depth;
1098         device_exist->ioaccel_handle = new_device->ioaccel_handle;
1099         device_exist->volume_status = new_device->volume_status;
1100         device_exist->active_path_index = new_device->active_path_index;
1101         device_exist->path_map = new_device->path_map;
1102         device_exist->bay = new_device->bay;
1103         memcpy(device_exist->box, new_device->box,
1104                 sizeof(device_exist->box));
1105         memcpy(device_exist->phys_connector, new_device->phys_connector,
1106                 sizeof(device_exist->phys_connector));
1107         device_exist->offload_config = new_device->offload_config;
1108         device_exist->offload_enabled = false;
1109         device_exist->offload_enabled_pending =
1110                 new_device->offload_enabled_pending;
1111         device_exist->offload_to_mirror = 0;
1112         if (device_exist->raid_map)
1113                 os_mem_free(softs,
1114                             (char *)device_exist->raid_map,
1115                             sizeof(*device_exist->raid_map));
1116         device_exist->raid_map = new_device->raid_map;
1117         /* To prevent this from being freed later. */
1118         new_device->raid_map = NULL;
1119         DBG_FUNC("OUT\n");
1120 }
1121
1122 /* Validate the ioaccel_handle for a newly added device */
1123 static pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
1124         pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
1125 {
1126         pqi_scsi_dev_t *device;
1127         int i,j;
1128         DBG_FUNC("IN\n");       
1129         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1130                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1131                         if(softs->device_list[i][j] == NULL)
1132                                 continue;
1133                         device = softs->device_list[i][j];
1134                         if (device->devtype != DISK_DEVICE)
1135                                 continue;
1136                         if (pqisrc_is_logical_device(device))
1137                                 continue;
1138                         if (device->ioaccel_handle == ioaccel_handle)
1139                                 return device;
1140                 }
1141         }
1142         DBG_FUNC("OUT\n");
1143
1144         return NULL;
1145 }
1146
1147 /* Get the scsi device queue depth */
1148 static void pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
1149 {
1150         unsigned i;
1151         unsigned phys_dev_num;
1152         unsigned num_raidmap_entries;
1153         unsigned queue_depth;
1154         pqisrc_raid_map_t *raid_map;
1155         pqi_scsi_dev_t *device;
1156         raidmap_data_t *dev_data;
1157         pqi_scsi_dev_t *phys_disk;
1158         unsigned j;
1159         unsigned k;
1160
1161         DBG_FUNC("IN\n");
1162
1163         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1164                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1165                         if(softs->device_list[i][j] == NULL)
1166                                 continue;
1167                         device = softs->device_list[i][j];
1168                         if (device->devtype != DISK_DEVICE)
1169                                 continue;
1170                         if (!pqisrc_is_logical_device(device))
1171                                 continue;
1172                         if (pqisrc_is_external_raid_device(device))
1173                                 continue;
1174                         device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1175                         raid_map = device->raid_map;
1176                         if (!raid_map)
1177                                 return;
1178                         dev_data = raid_map->dev_data;
1179                         phys_dev_num = LE_16(raid_map->layout_map_count) *
1180                                         (LE_16(raid_map->data_disks_per_row) +
1181                                         LE_16(raid_map->metadata_disks_per_row));
1182                         num_raidmap_entries = phys_dev_num *
1183                                                 LE_16(raid_map->row_cnt);
1184
1185                         queue_depth = 0;
1186                         for (k = 0; k < num_raidmap_entries; k++) {
1187                                 phys_disk = pqisrc_identify_device_via_ioaccel(softs,
1188                                                 dev_data[k].ioaccel_handle);
1189
1190                                 if (!phys_disk) {
1191                                         DBG_WARN(
1192                                         "Failed to find physical disk handle for logical drive %016llx\n",
1193                                                 (unsigned long long)BE_64(device->scsi3addr[0]));
1194                                         device->offload_enabled = false;
1195                                         device->offload_enabled_pending = false;
1196                                         if (raid_map)
1197                                                 os_mem_free(softs, (char *)raid_map, sizeof(*raid_map));
1198                                         device->raid_map = NULL;
1199                                         return;
1200                                 }
1201
1202                                 queue_depth += phys_disk->queue_depth;
1203                         }
1204
1205                         device->queue_depth = queue_depth;
1206                 } /* end inner loop */
1207         }/* end outer loop */
1208         DBG_FUNC("OUT\n");
1209 }
1210
1211 /* Function used to add a scsi device to OS scsi subsystem */
1212 static int pqisrc_add_device(pqisrc_softstate_t *softs,
1213         pqi_scsi_dev_t *device)
1214 {
1215         DBG_FUNC("IN\n");
1216         DBG_WARN("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1217                 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1218
1219         device->invalid = false;
1220
1221         if(device->expose_device) {
1222                 /* TBD: Call OS upper layer function to add the device entry */
1223                 os_add_device(softs,device);
1224         }
1225         DBG_FUNC("OUT\n");
1226         return PQI_STATUS_SUCCESS;
1227
1228 }
1229
1230 /* Function used to remove a scsi device from OS scsi subsystem */
1231 void pqisrc_remove_device(pqisrc_softstate_t *softs,
1232         pqi_scsi_dev_t *device)
1233 {
1234         DBG_FUNC("IN\n");
1235         DBG_DISC("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1236                 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1237
1238         /* TBD: Call OS upper layer function to remove the device entry */
1239         device->invalid = true;
1240         os_remove_device(softs,device);
1241         DBG_FUNC("OUT\n");
1242 }
1243
1244
1245 /*
1246  * When exposing new device to OS fails then adjst list according to the
1247  * mid scsi list
1248  */
1249 static void pqisrc_adjust_list(pqisrc_softstate_t *softs,
1250         pqi_scsi_dev_t *device)
1251 {
1252         DBG_FUNC("IN\n");
1253
1254         if (!device) {
1255                 DBG_ERR("softs = %p: device is NULL !!!\n", softs);
1256                 return;
1257         }
1258
1259         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1260         softs->device_list[device->target][device->lun] = NULL;
1261         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1262         pqisrc_device_mem_free(softs, device);
1263
1264         DBG_FUNC("OUT\n");
1265 }
1266
1267 /* Debug routine used to display the RAID volume status of the device */
1268 static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
1269         pqi_scsi_dev_t *device)
1270 {
1271         char *status;
1272
1273         DBG_FUNC("IN\n");
1274         switch (device->volume_status) {
1275         case SA_LV_OK:
1276                 status = "Volume is online.";
1277                 break;
1278         case SA_LV_UNDERGOING_ERASE:
1279                 status = "Volume is undergoing background erase process.";
1280                 break;
1281         case SA_LV_NOT_AVAILABLE:
1282                 status = "Volume is waiting for transforming volume.";
1283                 break;
1284         case SA_LV_UNDERGOING_RPI:
1285                 status = "Volume is undergoing rapid parity initialization process.";
1286                 break;
1287         case SA_LV_PENDING_RPI:
1288                 status = "Volume is queued for rapid parity initialization process.";
1289                 break;
1290         case SA_LV_ENCRYPTED_NO_KEY:
1291                 status = "Volume is encrypted and cannot be accessed because key is not present.";
1292                 break;
1293         case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1294                 status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.";
1295                 break;
1296         case SA_LV_UNDERGOING_ENCRYPTION:
1297                 status = "Volume is undergoing encryption process.";
1298                 break;
1299         case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1300                 status = "Volume is undergoing encryption re-keying process.";
1301                 break;
1302         case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1303                 status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled.";
1304                 break;
1305         case SA_LV_PENDING_ENCRYPTION:
1306                 status = "Volume is pending migration to encrypted state, but process has not started.";
1307                 break;
1308         case SA_LV_PENDING_ENCRYPTION_REKEYING:
1309                 status = "Volume is encrypted and is pending encryption rekeying.";
1310                 break;
1311         case SA_LV_STATUS_VPD_UNSUPPORTED:
1312                 status = "Volume status is not available through vital product data pages.";
1313                 break;
1314         default:
1315                 status = "Volume is in an unknown state.";
1316                 break;
1317         }
1318
1319         DBG_DISC("scsi BTL %d:%d:%d %s\n",
1320                 device->bus, device->target, device->lun, status);
1321         DBG_FUNC("OUT\n");
1322 }
1323
1324 void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1325 {
1326         DBG_FUNC("IN\n");
1327         if (!device)
1328                 return;
1329         if (device->raid_map) {
1330                         os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t));
1331         }
1332         os_mem_free(softs, (char *)device,sizeof(*device));
1333         DBG_FUNC("OUT\n");
1334         
1335 }
1336
1337 /* OS should call this function to free the scsi device */
1338 void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
1339 {
1340
1341                 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1342                 if (!pqisrc_is_logical_device(device)) {
1343                         pqisrc_free_tid(softs,device->target);
1344                 }
1345                 pqisrc_device_mem_free(softs, device);
1346                 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1347
1348 }
1349
1350
1351 /* Update the newly added devices to the device list */
1352 static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
1353         pqi_scsi_dev_t *new_device_list[], int num_new_devices)
1354 {
1355         int ret;
1356         int i;
1357         device_status_t dev_status;
1358         pqi_scsi_dev_t *device;
1359         pqi_scsi_dev_t *same_device;
1360         pqi_scsi_dev_t **added = NULL;
1361         pqi_scsi_dev_t **removed = NULL;
1362         int nadded = 0, nremoved = 0;
1363         int j;
1364         int tid = 0;
1365
1366         DBG_FUNC("IN\n");
1367
1368         added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES);
1369         removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES);
1370
1371         if (!added || !removed) {
1372                 DBG_WARN("Out of memory \n");
1373                 goto free_and_out;
1374         }
1375         
1376         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1377         
1378         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1379                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1380                         if(softs->device_list[i][j] == NULL)
1381                                 continue;
1382                         device = softs->device_list[i][j];
1383                         device->device_gone = true;
1384                 }
1385         }
1386         DBG_IO("Device list used an array\n");
1387         for (i = 0; i < num_new_devices; i++) {
1388                 device = new_device_list[i];
1389
1390                 dev_status = pqisrc_scsi_find_entry(softs, device,
1391                         &same_device);
1392
1393                 switch (dev_status) {
1394                 case DEVICE_UNCHANGED:
1395                         /* New Device present in existing device list  */
1396                         device->new_device = false;
1397                         same_device->device_gone = false;
1398                         pqisrc_exist_device_update(softs, same_device, device);
1399                         break;
1400                 case DEVICE_NOT_FOUND:
1401                         /* Device not found in existing list */
1402                         device->new_device = true;
1403                         break;
1404                 case DEVICE_CHANGED:
1405                         /* Actual device gone need to add device to list*/
1406                         device->new_device = true;
1407                         break;
1408                 default:
1409                         break;
1410                 }
1411         }
1412         /* Process all devices that have gone away. */
1413         for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) {
1414                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1415                         if(softs->device_list[i][j] == NULL)
1416                                 continue;
1417                         device = softs->device_list[i][j];
1418                         if (device->device_gone) {
1419                                 softs->device_list[device->target][device->lun] = NULL;
1420                                 removed[nremoved] = device;
1421                                 nremoved++;
1422                         }
1423                 }
1424         }
1425
1426         /* Process all new devices. */
1427         for (i = 0, nadded = 0; i < num_new_devices; i++) {
1428                 device = new_device_list[i];
1429                 if (!device->new_device)
1430                         continue;
1431                 if (device->volume_offline)
1432                         continue;
1433                 
1434                 /* physical device */
1435                 if (!pqisrc_is_logical_device(device)) {
1436                         tid = pqisrc_alloc_tid(softs);
1437                         if(INVALID_ELEM != tid)
1438                                 pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
1439                 }
1440
1441                 softs->device_list[device->target][device->lun] = device;
1442                 DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device,
1443                         device->bus,device->target,device->lun);
1444                 /* To prevent this entry from being freed later. */
1445                 new_device_list[i] = NULL;
1446                 added[nadded] = device;
1447                 nadded++;
1448         }
1449
1450         pqisrc_update_log_dev_qdepth(softs);
1451         
1452         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1453                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1454                         if(softs->device_list[i][j] == NULL)
1455                                 continue;
1456                         device = softs->device_list[i][j];
1457                         device->offload_enabled = device->offload_enabled_pending;
1458                 }
1459         }
1460
1461         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1462
1463         for(i = 0; i < nremoved; i++) {
1464                 device = removed[i];
1465                 if (device == NULL)
1466                         continue;
1467                 pqisrc_remove_device(softs, device);
1468                 pqisrc_display_device_info(softs, "removed", device);
1469                 
1470         }
1471
1472         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1473                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1474                         if(softs->device_list[i][j] == NULL)
1475                                 continue;
1476                         device = softs->device_list[i][j];
1477                         /*
1478                         * Notify the OS upper layer if the queue depth of any existing device has
1479                         * changed.
1480                         */
1481                         if (device->queue_depth !=
1482                                 device->advertised_queue_depth) {
1483                                 device->advertised_queue_depth = device->queue_depth;
1484                                 /* TBD: Call OS upper layer function to change device Q depth */
1485                         }
1486                 }
1487         }
1488         for(i = 0; i < nadded; i++) {
1489                 device = added[i];
1490                 if (device->expose_device) {
1491                         ret = pqisrc_add_device(softs, device);
1492                         if (ret) {
1493                                 DBG_WARN("scsi %d:%d:%d addition failed, device not added\n",
1494                                         device->bus, device->target,
1495                                         device->lun);
1496                                 pqisrc_adjust_list(softs, device);
1497                                 continue;
1498                         }
1499                 }
1500
1501                 pqisrc_display_device_info(softs, "added", device);
1502         }
1503
1504         /* Process all volumes that are offline. */
1505         for (i = 0; i < num_new_devices; i++) {
1506                 device = new_device_list[i];
1507                 if (!device)
1508                         continue;
1509                 if (!device->new_device)
1510                         continue;
1511                 if (device->volume_offline) {
1512                         pqisrc_display_volume_status(softs, device);
1513                         pqisrc_display_device_info(softs, "offline", device);
1514                 }
1515         }
1516
1517 free_and_out:
1518         if (added)
1519                 os_mem_free(softs, (char *)added,
1520                             sizeof(*added) * PQI_MAX_DEVICES); 
1521         if (removed)
1522                 os_mem_free(softs, (char *)removed,
1523                             sizeof(*removed) * PQI_MAX_DEVICES); 
1524
1525         DBG_FUNC("OUT\n");
1526 }
1527
1528 /*
1529  * Let the Adapter know about driver version using one of BMIC
1530  * BMIC_WRITE_HOST_WELLNESS
1531  */
1532 int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
1533 {
1534         int rval = PQI_STATUS_SUCCESS;
1535         struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
1536         size_t data_length;
1537         pqisrc_raid_req_t request;
1538
1539         DBG_FUNC("IN\n");
1540
1541         memset(&request, 0, sizeof(request));   
1542         data_length = sizeof(*host_wellness_driver_ver);
1543
1544         host_wellness_driver_ver = os_mem_alloc(softs, data_length);
1545         if (!host_wellness_driver_ver) {
1546                 DBG_ERR("failed to allocate memory for host wellness driver_version\n");
1547                 return PQI_STATUS_FAILURE;
1548         }
1549
1550         host_wellness_driver_ver->start_tag[0] = '<';
1551         host_wellness_driver_ver->start_tag[1] = 'H';
1552         host_wellness_driver_ver->start_tag[2] = 'W';
1553         host_wellness_driver_ver->start_tag[3] = '>';
1554         host_wellness_driver_ver->driver_version_tag[0] = 'D';
1555         host_wellness_driver_ver->driver_version_tag[1] = 'V';
1556         host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version));
1557         strncpy(host_wellness_driver_ver->driver_version, softs->os_name,
1558         sizeof(host_wellness_driver_ver->driver_version));
1559     if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) {
1560         strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION,
1561                         sizeof(host_wellness_driver_ver->driver_version) -  strlen(softs->os_name));
1562     } else {
1563         DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n",
1564             strlen(softs->os_name));
1565     }
1566         host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
1567         host_wellness_driver_ver->end_tag[0] = 'Z';
1568         host_wellness_driver_ver->end_tag[1] = 'Z';
1569
1570         rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length,
1571                                         BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1572
1573         os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
1574         
1575         DBG_FUNC("OUT");
1576         return rval;
1577 }
1578
1579 /* 
1580  * Write current RTC time from host to the adapter using
1581  * BMIC_WRITE_HOST_WELLNESS
1582  */
1583 int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
1584 {
1585         int rval = PQI_STATUS_SUCCESS;
1586         struct bmic_host_wellness_time *host_wellness_time;
1587         size_t data_length;
1588         pqisrc_raid_req_t request;
1589
1590         DBG_FUNC("IN\n");
1591
1592         memset(&request, 0, sizeof(request));   
1593         data_length = sizeof(*host_wellness_time);
1594
1595         host_wellness_time = os_mem_alloc(softs, data_length);
1596         if (!host_wellness_time) {
1597                 DBG_ERR("failed to allocate memory for host wellness time structure\n");
1598                 return PQI_STATUS_FAILURE;
1599         }
1600
1601         host_wellness_time->start_tag[0] = '<';
1602         host_wellness_time->start_tag[1] = 'H';
1603         host_wellness_time->start_tag[2] = 'W';
1604         host_wellness_time->start_tag[3] = '>';
1605         host_wellness_time->time_tag[0] = 'T';
1606         host_wellness_time->time_tag[1] = 'D';
1607         host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) - 
1608                                                                                         offsetof(struct bmic_host_wellness_time, century));
1609
1610         os_get_time(host_wellness_time);
1611
1612         host_wellness_time->dont_write_tag[0] = 'D';
1613         host_wellness_time->dont_write_tag[1] = 'W';
1614         host_wellness_time->end_tag[0] = 'Z';
1615         host_wellness_time->end_tag[1] = 'Z';
1616         
1617         rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length,
1618                                         BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1619         
1620         os_mem_free(softs, (char *)host_wellness_time, data_length);
1621
1622         DBG_FUNC("OUT");
1623         return rval;
1624 }
1625
1626 /*
1627  * Function used to perform a rescan of scsi devices
1628  * for any config change events
1629  */
1630 int pqisrc_scan_devices(pqisrc_softstate_t *softs)
1631 {
1632         boolean_t is_physical_device;
1633         int ret = PQI_STATUS_FAILURE;
1634         int i;
1635         int new_dev_cnt;
1636         int phy_log_dev_cnt;
1637         uint8_t *scsi3addr;
1638         uint32_t physical_cnt;
1639         uint32_t logical_cnt;
1640         uint32_t ndev_allocated = 0;
1641         size_t phys_data_length, log_data_length;
1642         reportlun_data_ext_t *physical_dev_list = NULL;
1643         reportlun_data_ext_t *logical_dev_list = NULL;
1644         reportlun_ext_entry_t *lun_ext_entry = NULL;
1645         bmic_ident_physdev_t *bmic_phy_info = NULL;
1646         pqi_scsi_dev_t **new_device_list = NULL;
1647         pqi_scsi_dev_t *device = NULL;
1648         
1649
1650         DBG_FUNC("IN\n");
1651
1652         ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
1653                                       &phys_data_length, &log_data_length);
1654
1655         if (ret)
1656                 goto err_out;
1657
1658         physical_cnt = BE_32(physical_dev_list->header.list_length) 
1659                 / sizeof(physical_dev_list->lun_entries[0]);
1660         
1661         logical_cnt = BE_32(logical_dev_list->header.list_length)
1662                 / sizeof(logical_dev_list->lun_entries[0]);
1663
1664         DBG_DISC("physical_cnt %d logical_cnt %d\n", physical_cnt, logical_cnt);
1665
1666         if (physical_cnt) {
1667                 bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
1668                 if (bmic_phy_info == NULL) {
1669                         ret = PQI_STATUS_FAILURE;
1670                         DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret);
1671                         goto err_out;
1672                 }
1673         }
1674         phy_log_dev_cnt = physical_cnt + logical_cnt;
1675         new_device_list = os_mem_alloc(softs,
1676                                 sizeof(*new_device_list) * phy_log_dev_cnt);
1677
1678         if (new_device_list == NULL) {
1679                 ret = PQI_STATUS_FAILURE;
1680                 DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1681                 goto err_out;
1682         }
1683
1684         for (i = 0; i < phy_log_dev_cnt; i++) {
1685                 new_device_list[i] = os_mem_alloc(softs,
1686                                                 sizeof(*new_device_list[i]));
1687                 if (new_device_list[i] == NULL) {
1688                         ret = PQI_STATUS_FAILURE;
1689                         DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1690                         ndev_allocated = i;
1691                         goto err_out;
1692                 }
1693         }
1694
1695         ndev_allocated = phy_log_dev_cnt;
1696         new_dev_cnt = 0;
1697         for (i = 0; i < phy_log_dev_cnt; i++) {
1698
1699                 if (i < physical_cnt) {
1700                         is_physical_device = true;
1701                         lun_ext_entry = &physical_dev_list->lun_entries[i];
1702                 } else {
1703                         is_physical_device = false;
1704                         lun_ext_entry =
1705                                 &logical_dev_list->lun_entries[i - physical_cnt];
1706                 }
1707
1708                 scsi3addr = lun_ext_entry->lunid;
1709                 /* Save the target sas adderess for external raid device */
1710                 if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
1711                         int target = lun_ext_entry->lunid[3] & 0x3f;
1712                         softs->target_sas_addr[target] = BE_64(lun_ext_entry->wwid);
1713                 }
1714
1715                 /* Skip masked physical non-disk devices. */
1716                 if (MASKED_DEVICE(scsi3addr) && is_physical_device
1717                                 && (lun_ext_entry->ioaccel_handle == 0))
1718                         continue;
1719
1720                 device = new_device_list[new_dev_cnt];
1721                 memset(device, 0, sizeof(*device));
1722                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1723                 device->wwid = lun_ext_entry->wwid;
1724                 device->is_physical_device = is_physical_device;
1725                 if (!is_physical_device)
1726                         device->is_external_raid_device =
1727                                 pqisrc_is_external_raid_addr(scsi3addr);
1728                 
1729
1730                 /* Get device type, vendor, model, device ID. */
1731                 ret = pqisrc_get_dev_data(softs, device);
1732                 if (ret) {
1733                         DBG_WARN("Inquiry failed, skipping device %016llx\n",
1734                                  (unsigned long long)BE_64(device->scsi3addr[0]));
1735                         DBG_DISC("INQUIRY FAILED \n");
1736                         continue;
1737                 }
1738                 pqisrc_assign_btl(device);
1739
1740                 /*
1741                  * Expose all devices except for physical devices that
1742                  * are masked.
1743                  */
1744                 if (device->is_physical_device &&
1745                         MASKED_DEVICE(scsi3addr))
1746                         device->expose_device = false;
1747                 else
1748                         device->expose_device = true;
1749
1750                 if (device->is_physical_device &&
1751                     (lun_ext_entry->device_flags &
1752                      REPORT_LUN_DEV_FLAG_AIO_ENABLED) &&
1753                      lun_ext_entry->ioaccel_handle) {
1754                         device->aio_enabled = true;
1755                 }
1756                 switch (device->devtype) {
1757                 case ROM_DEVICE:
1758                         /*
1759                          * We don't *really* support actual CD-ROM devices,
1760                          * but we do support the HP "One Button Disaster
1761                          * Recovery" tape drive which temporarily pretends to
1762                          * be a CD-ROM drive.
1763                          */
1764                         if (device->is_obdr_device)
1765                                 new_dev_cnt++;
1766                         break;
1767                 case DISK_DEVICE:
1768                 case ZBC_DEVICE:
1769                         if (device->is_physical_device) {
1770                                 device->ioaccel_handle =
1771                                         lun_ext_entry->ioaccel_handle;
1772                                 device->sas_address = BE_64(lun_ext_entry->wwid);
1773                                 pqisrc_get_physical_device_info(softs, device,
1774                                         bmic_phy_info);
1775                         }
1776                         new_dev_cnt++;
1777                         break;
1778                 case ENCLOSURE_DEVICE:
1779                         if (device->is_physical_device) {
1780                                 device->sas_address = BE_64(lun_ext_entry->wwid);
1781                         }
1782                         new_dev_cnt++;
1783                         break;  
1784                 case TAPE_DEVICE:
1785                 case MEDIUM_CHANGER_DEVICE:
1786                         new_dev_cnt++;
1787                         break;
1788                 case RAID_DEVICE:
1789                         /*
1790                          * Only present the HBA controller itself as a RAID
1791                          * controller.  If it's a RAID controller other than
1792                          * the HBA itself (an external RAID controller, MSA500
1793                          * or similar), don't present it.
1794                          */
1795                         if (pqisrc_is_hba_lunid(scsi3addr))
1796                                 new_dev_cnt++;
1797                         break;
1798                 case SES_DEVICE:
1799                 case CONTROLLER_DEVICE:
1800                         break;
1801                 }
1802         }
1803         DBG_DISC("new_dev_cnt %d\n", new_dev_cnt);
1804
1805         pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
1806         
1807 err_out:
1808         if (new_device_list) {
1809                 for (i = 0; i < ndev_allocated; i++) {
1810                         if (new_device_list[i]) {
1811                                 if(new_device_list[i]->raid_map)
1812                                         os_mem_free(softs, (char *)new_device_list[i]->raid_map,
1813                                                                                 sizeof(pqisrc_raid_map_t));
1814                                 os_mem_free(softs, (char*)new_device_list[i],
1815                                                                 sizeof(*new_device_list[i]));
1816                         }
1817                 }
1818                 os_mem_free(softs, (char *)new_device_list,
1819                                         sizeof(*new_device_list) * ndev_allocated); 
1820         }
1821         if(physical_dev_list)
1822                 os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
1823         if(logical_dev_list)
1824                 os_mem_free(softs, (char *)logical_dev_list, log_data_length);
1825         if (bmic_phy_info)
1826                 os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
1827         
1828         DBG_FUNC("OUT \n");
1829
1830         return ret;
1831 }
1832
1833 /*
1834  * Clean up memory allocated for devices.
1835  */
1836 void pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
1837 {
1838
1839         int i = 0,j = 0;
1840         pqi_scsi_dev_t *dvp = NULL;
1841         DBG_FUNC("IN\n");
1842         
1843         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1844                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1845                         if (softs->device_list[i][j] == NULL) 
1846                                 continue;
1847                         dvp = softs->device_list[i][j];
1848                         pqisrc_device_mem_free(softs, dvp);
1849                 }
1850         }
1851         DBG_FUNC("OUT\n");
1852 }
1853