]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_discovery.c
pmic: rockchip: Split the clocks part in its own file
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_discovery.c
1 /*-
2  * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25
26 /* $FreeBSD$ */
27
28 #include "smartpqi_includes.h"
29
30 #define MAX_RETRIES             3
31 #define PQISRC_INQUIRY_TIMEOUT  30
32
33 /* Validate the scsi sense response code */
34 static inline
35 boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
36 {
37         DBG_FUNC("IN\n");
38
39         if (!sshdr)
40                 return false;
41
42         DBG_FUNC("OUT\n");
43
44         return (sshdr->response_code & 0x70) == 0x70;
45 }
46
47 /*
48  * Initialize target ID pool for HBA/PDs .
49  */
50 void
51 pqisrc_init_targetid_pool(pqisrc_softstate_t *softs)
52 {
53         int i, tid = PQI_MAX_PHYSICALS + PQI_MAX_LOGICALS - 1;
54
55         for(i = 0; i < PQI_MAX_PHYSICALS; i++) {
56                 softs->tid_pool.tid[i] = tid--;
57         }
58         softs->tid_pool.index = i - 1;
59 }
60
61 int
62 pqisrc_alloc_tid(pqisrc_softstate_t *softs)
63 {
64
65         if(softs->tid_pool.index <= -1) {
66                 DBG_ERR("Target ID exhausted\n");
67                 return INVALID_ELEM;
68         }
69         
70         return  softs->tid_pool.tid[softs->tid_pool.index--];
71 }
72
73 void
74 pqisrc_free_tid(pqisrc_softstate_t *softs, int tid)
75 {
76         if(softs->tid_pool.index >= (PQI_MAX_PHYSICALS - 1)) {
77                 DBG_ERR("Target ID queue is full\n");
78                 return;
79         }
80
81         softs->tid_pool.index++;
82         softs->tid_pool.tid[softs->tid_pool.index] = tid;
83 }
84
85 /* Update scsi sense info to a local buffer*/
86 boolean_t
87 pqisrc_update_scsi_sense(const uint8_t *buff, int len,
88                               struct sense_header_scsi *header)
89 {
90
91         DBG_FUNC("IN\n");
92
93         if (!buff || !len)
94                 return false;
95
96         memset(header, 0, sizeof(struct sense_header_scsi));
97
98         header->response_code = (buff[0] & 0x7f);
99
100         if (!pqisrc_scsi_sense_valid(header))
101                 return false;
102
103         if (header->response_code >= 0x72) {
104                 /* descriptor format */
105                 if (len > 1)
106                         header->sense_key = (buff[1] & 0xf);
107                 if (len > 2)
108                         header->asc = buff[2];
109                 if (len > 3)
110                         header->ascq = buff[3];
111                 if (len > 7)
112                         header->additional_length = buff[7];
113         } else {
114                  /* fixed format */
115                 if (len > 2)
116                         header->sense_key = (buff[2] & 0xf);
117                 if (len > 7) {
118                         len = (len < (buff[7] + 8)) ?
119                                         len : (buff[7] + 8);
120                         if (len > 12)
121                                 header->asc = buff[12];
122                         if (len > 13)
123                                 header->ascq = buff[13];
124                 }
125         }
126
127         DBG_FUNC("OUT\n");
128
129         return true;
130 }
131
132 /*
133  * Function used to build the internal raid request and analyze the response
134  */
135 int
136 pqisrc_build_send_raid_request(pqisrc_softstate_t *softs,  pqisrc_raid_req_t *request,
137                             void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
138                             raid_path_error_info_elem_t *error_info)
139 {
140
141         uint8_t *cdb;
142         int ret = PQI_STATUS_SUCCESS;
143         uint32_t tag = 0;
144         struct dma_mem device_mem;
145         sgt_t *sgd;
146
147         ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
148         ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
149
150         rcb_t *rcb = NULL;
151
152         DBG_FUNC("IN\n");
153
154         memset(&device_mem, 0, sizeof(struct dma_mem));
155
156         /* for TUR datasize: 0 buff: NULL */
157         if (datasize) {
158                 device_mem.tag = "device_mem";
159                 device_mem.size = datasize;
160                 device_mem.align = PQISRC_DEFAULT_DMA_ALIGN;
161
162                 ret = os_dma_mem_alloc(softs, &device_mem);
163
164                 if (ret) {
165                         DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
166                         return ret;
167                 }
168
169                 sgd = (sgt_t *)&request->sg_descriptors[0];
170
171                 sgd->addr = device_mem.dma_addr;
172                 sgd->len = datasize;
173                 sgd->flags = SG_FLAG_LAST;
174
175         }
176
177         /* Build raid path request */
178         request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
179
180         request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t,
181                                                         sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH);
182         request->buffer_length = LE_32(datasize);
183         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
184         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
185         request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
186
187         cdb = request->cdb;
188
189         switch (cmd) {
190         case SA_INQUIRY:
191                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
192                 cdb[0] = SA_INQUIRY;
193                 if (vpd_page & VPD_PAGE) {
194                         cdb[1] = 0x1;
195                         cdb[2] = (uint8_t)vpd_page;
196                 }
197                 cdb[4] = (uint8_t)datasize;
198                 if (softs->timeout_in_passthrough) {
199                         request->timeout_in_sec = PQISRC_INQUIRY_TIMEOUT;
200                 }
201                 break;
202         case SA_REPORT_LOG:
203         case SA_REPORT_PHYS:
204                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
205                 cdb[0] = cmd;
206                 if (cmd == SA_REPORT_PHYS)
207                         cdb[1] = SA_REPORT_PHYS_EXTENDED;
208                 else
209                 cdb[1] = SA_REPORT_LOG_EXTENDED;
210                 cdb[8] = (uint8_t)((datasize) >> 8);
211                 cdb[9] = (uint8_t)datasize;
212                 break;
213         case PQI_LOG_EXT_QUEUE_ENABLE:
214                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
215                 cdb[0] = SA_REPORT_LOG;
216                 cdb[1] = (PQI_LOG_EXT_QUEUE_DEPTH_ENABLED | SA_REPORT_LOG_EXTENDED);
217                 cdb[8] = (uint8_t)((datasize) >> 8);
218                 cdb[9] = (uint8_t)datasize;
219                 break;
220         case TEST_UNIT_READY:
221                 request->data_direction = SOP_DATA_DIR_NONE;
222                 break;
223         case SA_GET_RAID_MAP:
224                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
225                 cdb[0] = SA_CISS_READ;
226                 cdb[1] = cmd;
227                 cdb[8] = (uint8_t)((datasize) >> 8);
228                 cdb[9] = (uint8_t)datasize;
229                 break;
230         case SA_CACHE_FLUSH:
231                 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
232                 memcpy(device_mem.virt_addr, buff, datasize);
233                 cdb[0] = BMIC_WRITE;
234                 cdb[6] = BMIC_CACHE_FLUSH;
235                 cdb[7] = (uint8_t)((datasize)  << 8);
236                 cdb[8] = (uint8_t)((datasize)  >> 8);
237                 break;
238         case BMIC_IDENTIFY_CONTROLLER:
239         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
240                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
241                 cdb[0] = BMIC_READ;
242                 cdb[6] = cmd;
243                 cdb[7] = (uint8_t)((datasize)  << 8);
244                 cdb[8] = (uint8_t)((datasize)  >> 8);
245                 break;
246         case BMIC_WRITE_HOST_WELLNESS:
247                 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
248                 memcpy(device_mem.virt_addr, buff, datasize);
249                 cdb[0] = BMIC_WRITE;
250                 cdb[6] = cmd;
251                 cdb[7] = (uint8_t)((datasize)  << 8);
252                 cdb[8] = (uint8_t)((datasize)  >> 8);
253                 break;
254         case BMIC_SENSE_SUBSYSTEM_INFORMATION:
255                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
256                 cdb[0] = BMIC_READ;
257                 cdb[6] = cmd;
258                 cdb[7] = (uint8_t)((datasize)  << 8);
259                 cdb[8] = (uint8_t)((datasize)  >> 8);
260                 break;
261         default:
262                 DBG_ERR("unknown command 0x%x", cmd);
263                 ret = PQI_STATUS_FAILURE;
264                 return ret;
265         }
266
267         tag = pqisrc_get_tag(&softs->taglist);
268         if (INVALID_ELEM == tag) {
269                 DBG_ERR("Tag not available\n");
270                 ret = PQI_STATUS_FAILURE;
271                 goto err_notag;
272         }
273
274         ((pqisrc_raid_req_t *)request)->request_id = tag;
275         ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id;
276         ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id;
277         rcb = &softs->rcb[tag];
278         rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
279         rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
280
281         rcb->req_pending = true;
282         rcb->tag = tag;
283         /* Submit Command */
284         ret = pqisrc_submit_cmnd(softs, ib_q, request);
285
286         if (ret != PQI_STATUS_SUCCESS) {
287                 DBG_ERR("Unable to submit command\n");
288                 goto err_out;
289         }
290
291         ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
292         if (ret != PQI_STATUS_SUCCESS) {
293                 DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
294                 goto err_out;
295         }
296
297         if (datasize) {
298                 if (buff) {
299                         memcpy(buff, device_mem.virt_addr, datasize);
300                 }
301                 os_dma_mem_free(softs, &device_mem);
302         }
303
304         ret = rcb->status;
305         if (ret) {
306                 if(error_info) {
307                         memcpy(error_info,
308                                rcb->error_info,
309                                sizeof(*error_info));
310
311                         if (error_info->data_out_result ==
312                             PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
313                                 ret = PQI_STATUS_SUCCESS;
314                         }
315                         else{
316                                 DBG_DISC("Error!! Bus=%u Target=%u, Cmd=0x%x,"
317                                         "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr),
318                                         BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
319                                         cmd, ret);
320                                 ret = PQI_STATUS_FAILURE;
321                         }
322                 }
323         } else {
324                 if(error_info) {
325                         ret = PQI_STATUS_SUCCESS;
326                         memset(error_info, 0, sizeof(*error_info));
327                 }
328         }
329
330         os_reset_rcb(rcb);
331         pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
332         DBG_FUNC("OUT\n");
333         return ret;
334
335 err_out:
336         DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n",
337                 BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr),
338                 cmd, ret);
339         os_reset_rcb(rcb);
340         pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
341 err_notag:
342         if (datasize)
343                 os_dma_mem_free(softs, &device_mem);
344         DBG_FUNC("FAILED \n");
345         return ret;
346 }
347
348 /* common function used to send report physical and logical luns cmnds*/
349 static int
350 pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
351         void *buff, size_t buf_len)
352 {
353         int ret;
354         pqisrc_raid_req_t request;
355
356         DBG_FUNC("IN\n");
357
358         memset(&request, 0, sizeof(request));
359         ret =  pqisrc_build_send_raid_request(softs, &request, buff,
360                                 buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
361
362         DBG_FUNC("OUT\n");
363
364         return ret;
365 }
366
367 /* subroutine used to get physical and logical luns of the device */
368 int
369 pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
370                 reportlun_data_ext_t **buff, size_t *data_length)
371 {
372         int ret;
373         size_t list_len;
374         size_t data_len;
375         size_t new_lun_list_length;
376         reportlun_data_ext_t *lun_data;
377         reportlun_header_t report_lun_header;
378
379         DBG_FUNC("IN\n");
380
381         ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
382                 sizeof(report_lun_header));
383
384         if (ret) {
385                 DBG_ERR("failed return code: %d\n", ret);
386                 return ret;
387         }
388         list_len = BE_32(report_lun_header.list_length);
389
390 retry:
391         data_len = sizeof(reportlun_header_t) + list_len;
392         *data_length = data_len;
393
394         lun_data = os_mem_alloc(softs, data_len);
395
396         if (!lun_data) {
397                 DBG_ERR("failed to allocate memory for lun_data\n");
398                 return PQI_STATUS_FAILURE;
399         }
400
401         if (list_len == 0) {
402                 DBG_DISC("list_len is 0\n");
403                 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
404                 goto out;
405         }
406
407         ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
408
409         if (ret) {
410                 DBG_ERR("error\n");
411                 goto error;
412         }
413
414         new_lun_list_length = BE_32(lun_data->header.list_length);
415
416         if (new_lun_list_length > list_len) {
417                 list_len = new_lun_list_length;
418                 os_mem_free(softs, (void *)lun_data, data_len);
419                 goto retry;
420         }
421
422 out:
423         *buff = lun_data;
424         DBG_FUNC("OUT\n");
425         return 0;
426
427 error:
428         os_mem_free(softs, (void *)lun_data, data_len);
429         DBG_ERR("FAILED\n");
430         return ret;
431 }
432
433 /*
434  * Function used to grab queue depth ext lun data for logical devices
435  */
436 static int
437 pqisrc_get_queue_lun_list(pqisrc_softstate_t *softs, uint8_t cmd,
438                 reportlun_queue_depth_data_t **buff, size_t *data_length)
439 {
440         int ret;
441         size_t list_len;
442         size_t data_len;
443         size_t new_lun_list_length;
444         reportlun_queue_depth_data_t *lun_data;
445         reportlun_header_t report_lun_header;
446
447         DBG_FUNC("IN\n");
448
449         ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
450                 sizeof(report_lun_header));
451
452         if (ret) {
453                 DBG_ERR("failed return code: %d\n", ret);
454                 return ret;
455         }
456         list_len = BE_32(report_lun_header.list_length);
457 retry:
458         data_len = sizeof(reportlun_header_t) + list_len;
459         *data_length = data_len;
460         lun_data = os_mem_alloc(softs, data_len);
461
462         if (!lun_data) {
463                 DBG_ERR("failed to allocate memory for lun_data\n");
464                 return PQI_STATUS_FAILURE;
465         }
466
467         if (list_len == 0) {
468                 DBG_INFO("list_len is 0\n");
469                 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
470                 goto out;
471         }
472         ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
473
474         if (ret) {
475                 DBG_ERR("error\n");
476                 goto error;
477         }
478         new_lun_list_length = BE_32(lun_data->header.list_length);
479
480         if (new_lun_list_length > list_len) {
481                 list_len = new_lun_list_length;
482                 os_mem_free(softs, (void *)lun_data, data_len);
483                 goto retry;
484         }
485
486 out:
487         *buff = lun_data;
488         DBG_FUNC("OUT\n");
489         return 0;
490
491 error:
492         os_mem_free(softs, (void *)lun_data, data_len);
493         DBG_ERR("FAILED\n");
494         return ret;
495 }
496
497 /*
498  * Function used to get physical and logical device list
499  */
500 static int
501 pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
502         reportlun_data_ext_t **physical_dev_list,
503         reportlun_data_ext_t **logical_dev_list,
504         reportlun_queue_depth_data_t **queue_dev_list,
505         size_t *queue_data_length,
506         size_t *phys_data_length,
507         size_t *log_data_length)
508 {
509         int ret = PQI_STATUS_SUCCESS;
510         size_t logical_list_length;
511         size_t logdev_data_length;
512         size_t data_length;
513         reportlun_data_ext_t *local_logdev_list;
514         reportlun_data_ext_t *logdev_data;
515         reportlun_header_t report_lun_header;
516
517         DBG_FUNC("IN\n");
518
519         ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length);
520         if (ret) {
521                 DBG_ERR("report physical LUNs failed");
522                 return ret;
523         }
524
525         ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length);
526         if (ret) {
527                 DBG_ERR("report logical LUNs failed");
528                 return ret;
529         }
530
531         ret = pqisrc_get_queue_lun_list(softs, PQI_LOG_EXT_QUEUE_ENABLE, queue_dev_list, queue_data_length);
532         if (ret) {
533                 DBG_ERR("report logical LUNs failed");
534                 return ret;
535         }
536
537         logdev_data = *logical_dev_list;
538
539         if (logdev_data) {
540                 logical_list_length =
541                         BE_32(logdev_data->header.list_length);
542         } else {
543                 memset(&report_lun_header, 0, sizeof(report_lun_header));
544                 logdev_data =
545                         (reportlun_data_ext_t *)&report_lun_header;
546                 logical_list_length = 0;
547         }
548
549         logdev_data_length = sizeof(reportlun_header_t) +
550                 logical_list_length;
551
552         /* Adding LOGICAL device entry for controller */
553         local_logdev_list = os_mem_alloc(softs,
554                                             logdev_data_length + sizeof(reportlun_ext_entry_t));
555         if (!local_logdev_list) {
556                 data_length = *log_data_length;
557                 os_mem_free(softs, (char *)*logical_dev_list, data_length);
558                 *logical_dev_list = NULL;
559                 return PQI_STATUS_FAILURE;
560         }
561
562         memcpy(local_logdev_list, logdev_data, logdev_data_length);
563         memset((uint8_t *)local_logdev_list + logdev_data_length, 0,
564                 sizeof(reportlun_ext_entry_t));
565         local_logdev_list->header.list_length = BE_32(logical_list_length +
566                                                         sizeof(reportlun_ext_entry_t));
567         data_length = *log_data_length;
568         os_mem_free(softs, (char *)*logical_dev_list, data_length);
569         *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t);
570         *logical_dev_list = local_logdev_list;
571
572         DBG_FUNC("OUT\n");
573
574         return ret;
575 }
576
577 /* Subroutine used to set Bus-Target-Lun for the requested device */
578 static inline void
579 pqisrc_set_btl(pqi_scsi_dev_t *device,
580         int bus, int target, int lun)
581 {
582         DBG_FUNC("IN\n");
583
584         device->bus = bus;
585         device->target = target;
586         device->lun = lun;
587
588         DBG_FUNC("OUT\n");
589 }
590
591 inline
592 boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
593 {
594         return device->is_external_raid_device;
595 }
596
597 static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
598 {
599         return scsi3addr[2] != 0;
600 }
601
602 /* Function used to assign Bus-Target-Lun for the requested device */
603 static void
604 pqisrc_assign_btl(pqi_scsi_dev_t *device)
605 {
606         uint8_t *scsi3addr;
607         uint32_t lunid;
608         uint32_t bus;
609         uint32_t target;
610         uint32_t lun;
611         DBG_FUNC("IN\n");
612
613         scsi3addr = device->scsi3addr;
614         lunid = GET_LE32(scsi3addr);
615
616         if (pqisrc_is_hba_lunid(scsi3addr)) {
617                 /* The specified device is the controller. */
618                 pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, (lunid & 0x3fff) + 1);
619                 device->target_lun_valid = true;
620                 return;
621         }
622
623         if (pqisrc_is_logical_device(device)) {
624                 if (pqisrc_is_external_raid_device(device)) {
625                         DBG_DISC("External Raid Device!!!");
626                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
627                         target = (lunid >> 16) & 0x3fff;
628                         lun = lunid & 0xff;
629                 } else {
630                         bus = PQI_RAID_VOLUME_BUS;
631                         lun = (lunid & 0x3fff) + 1;
632                         target = 0;
633                 }
634                 pqisrc_set_btl(device, bus, target, lun);
635                 device->target_lun_valid = true;
636                 return;
637         }
638
639         DBG_FUNC("OUT\n");
640 }
641
642 /* Build and send the internal INQUIRY command to particular device */
643 int
644 pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
645         uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
646 {
647         int ret = PQI_STATUS_SUCCESS;
648         pqisrc_raid_req_t request;
649         raid_path_error_info_elem_t error_info;
650
651         DBG_FUNC("IN\n");
652
653         memset(&request, 0, sizeof(request));
654         ret =  pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
655                                                                 SA_INQUIRY, vpd_page, scsi3addr, &error_info);
656
657         DBG_FUNC("OUT\n");
658         return ret;
659 }
660
661 #if 0
662 /* Function used to parse the sense information from response */
663 static void
664 pqisrc_fetch_sense_info(const uint8_t *sense_data,
665         unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
666 {
667         struct sense_header_scsi header;
668
669         DBG_FUNC("IN\n");
670
671         *sense_key = 0;
672         *ascq = 0;
673         *asc = 0;
674
675         if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) {
676                 *sense_key = header.sense_key;
677                 *asc = header.asc;
678                 *ascq = header.ascq;
679         }
680
681         DBG_DISC("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq);
682
683         DBG_FUNC("OUT\n");
684 }
685 #endif
686
687 /* Determine logical volume status from vpd buffer.*/
688 static void pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
689         pqi_scsi_dev_t *device)
690 {
691         int ret;
692         uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
693         uint8_t vpd_size = sizeof(vpd_volume_status);
694         uint8_t offline = true;
695         size_t page_length;
696         vpd_volume_status *vpd;
697
698         DBG_FUNC("IN\n");
699
700         vpd = os_mem_alloc(softs, vpd_size);
701         if (vpd == NULL)
702                 goto out;
703
704         /* Get the size of the VPD return buff. */
705         ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
706                 (uint8_t *)vpd, vpd_size);
707
708         if (ret) {
709                 DBG_WARN("Inquiry returned failed status\n");
710                 goto out;
711         }
712
713         if (vpd->page_code != SA_VPD_LV_STATUS) {
714                 DBG_WARN("Returned invalid buffer\n");
715                 goto out;
716         }
717
718         page_length = offsetof(vpd_volume_status, volume_status) + vpd->page_length;
719         if (page_length < vpd_size)
720                 goto out;
721
722         status = vpd->volume_status;
723         offline = (vpd->flags & SA_LV_FLAGS_NO_HOST_IO)!=0;
724
725 out:
726         device->volume_offline = offline;
727         device->volume_status = status;
728
729         os_mem_free(softs, (char *)vpd, vpd_size);
730
731         DBG_FUNC("OUT\n");
732
733         return;
734 }
735
736 /* Validate the RAID map parameters */
737 static int
738 pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
739         pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
740 {
741         char *error_msg;
742         uint32_t raidmap_size;
743         uint32_t r5or6_blocks_per_row;
744
745         DBG_FUNC("IN\n");
746
747         raidmap_size = LE_32(raid_map->structure_size);
748         if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) {
749                 error_msg = "RAID map too small\n";
750                 goto error;
751         }
752
753 #if 0
754         phys_dev_num = LE_16(raid_map->layout_map_count) *
755         (LE_16(raid_map->data_disks_per_row) +
756         LE_16(raid_map->metadata_disks_per_row));
757 #endif
758
759         if (device->raid_level == SA_RAID_1) {
760                 if (LE_16(raid_map->layout_map_count) != 2) {
761                         error_msg = "invalid RAID-1 map\n";
762                         goto error;
763                 }
764         } else if (device->raid_level == SA_RAID_ADM) {
765                 if (LE_16(raid_map->layout_map_count) != 3) {
766                         error_msg = "invalid RAID-1(triple) map\n";
767                         goto error;
768                 }
769         } else if ((device->raid_level == SA_RAID_5 ||
770                 device->raid_level == SA_RAID_6) &&
771                 LE_16(raid_map->layout_map_count) > 1) {
772                 /* RAID 50/60 */
773                 r5or6_blocks_per_row =
774                         LE_16(raid_map->strip_size) *
775                         LE_16(raid_map->data_disks_per_row);
776                 if (r5or6_blocks_per_row == 0) {
777                         error_msg = "invalid RAID-5 or RAID-6 map\n";
778                         goto error;
779                 }
780         }
781
782         DBG_FUNC("OUT\n");
783
784         return 0;
785
786 error:
787         DBG_NOTE("%s\n", error_msg);
788         return PQI_STATUS_FAILURE;
789 }
790
791 /* Get device raidmap for the requested device */
792 static int
793 pqisrc_get_device_raidmap(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
794 {
795         int ret = PQI_STATUS_SUCCESS;
796         int raidmap_size;
797
798         pqisrc_raid_req_t request;
799         pqisrc_raid_map_t *raid_map;
800
801         DBG_FUNC("IN\n");
802
803         raid_map = os_mem_alloc(softs, sizeof(*raid_map));
804         if (!raid_map)
805                 return PQI_STATUS_FAILURE;
806
807         memset(&request, 0, sizeof(request));
808         ret =  pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map),
809                                         SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
810
811         if (ret) {
812                 DBG_ERR("error in build send raid req ret=%d\n", ret);
813                 goto err_out;
814         }
815
816         raidmap_size = LE_32(raid_map->structure_size);
817         if (raidmap_size > sizeof(*raid_map)) {
818                 DBG_NOTE("Raid map is larger than 1024 entries, request once again");
819                 os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
820
821                 raid_map = os_mem_alloc(softs, raidmap_size);
822                 if (!raid_map)
823                         return PQI_STATUS_FAILURE;
824                 memset(&request, 0, sizeof(request));
825
826                 ret =  pqisrc_build_send_raid_request(softs, &request, raid_map, raidmap_size,
827                                         SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
828                 if (ret) {
829                         DBG_ERR("error in build send raid req ret=%d\n", ret);
830                         goto err_out;
831                 }
832
833                 if(LE_32(raid_map->structure_size) != raidmap_size) {
834                         DBG_WARN("Expected raid map size %d bytes and got %d bytes\n",
835                                 raidmap_size,LE_32(raid_map->structure_size));
836                         goto err_out;
837                 }
838         }
839
840         ret = pqisrc_raid_map_validation(softs, device, raid_map);
841         if (ret) {
842                 DBG_NOTE("error in raid map validation ret=%d\n", ret);
843                 goto err_out;
844         }
845
846         device->raid_map = raid_map;
847         DBG_FUNC("OUT\n");
848         return 0;
849
850 err_out:
851         os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
852         DBG_FUNC("FAILED \n");
853         return ret;
854 }
855
856 /* Get device ioaccel_status to validate the type of device */
857 static void
858 pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
859         pqi_scsi_dev_t *device)
860 {
861         int ret = PQI_STATUS_SUCCESS;
862         uint8_t *buff;
863         uint8_t ioaccel_status;
864
865         DBG_FUNC("IN\n");
866
867         buff = os_mem_alloc(softs, 64);
868         if (!buff)
869                 return;
870
871         ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
872                                         VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64);
873         if (ret) {
874                 DBG_ERR("error in send scsi inquiry ret=%d\n", ret);
875                 goto err_out;
876         }
877
878         ioaccel_status = buff[IOACCEL_STATUS_BYTE];
879         device->offload_config =
880                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
881
882         if (device->offload_config) {
883                 device->offload_enabled_pending =
884                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
885                 if (pqisrc_get_device_raidmap(softs, device))
886                         device->offload_enabled_pending = false;
887         }
888
889         DBG_DISC("offload_config: 0x%x offload_enabled_pending: 0x%x \n",
890                         device->offload_config, device->offload_enabled_pending);
891
892 err_out:
893         os_mem_free(softs, (char*)buff, 64);
894         DBG_FUNC("OUT\n");
895 }
896
897 /* Get RAID level of requested device */
898 static void
899 pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
900 {
901         uint8_t raid_level;
902         uint8_t *buff;
903
904         DBG_FUNC("IN\n");
905
906         raid_level = SA_RAID_UNKNOWN;
907
908         buff = os_mem_alloc(softs, 64);
909         if (buff) {
910                 int ret;
911                 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
912                         VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64);
913                 if (ret == 0) {
914                         raid_level = buff[8];
915                         if (raid_level > SA_RAID_MAX)
916                                 raid_level = SA_RAID_UNKNOWN;
917                 }
918                 os_mem_free(softs, (char*)buff, 64);
919         }
920
921         device->raid_level = raid_level;
922         DBG_DISC("RAID LEVEL: %x \n",  raid_level);
923         DBG_FUNC("OUT\n");
924 }
925
926 /* Parse the inquiry response and determine the type of device */
927 static int
928 pqisrc_get_dev_data(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
929 {
930         int ret = PQI_STATUS_SUCCESS;
931         uint8_t *inq_buff;
932         int retry = MAX_RETRIES;
933
934         DBG_FUNC("IN\n");
935
936         inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE);
937         if (!inq_buff)
938                 return PQI_STATUS_FAILURE;
939
940         while(retry--) {
941                 /* Send an inquiry to the device to see what it is. */
942                 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
943                         OBDR_TAPE_INQ_SIZE);
944                 if (!ret)
945                         break;
946                 DBG_WARN("Retrying inquiry !!!\n");
947         }
948         if(retry <= 0)
949                 goto err_out;
950         pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
951         pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
952
953         device->devtype = inq_buff[0] & 0x1f;
954         memcpy(device->vendor, &inq_buff[8],
955                 sizeof(device->vendor));
956         memcpy(device->model, &inq_buff[16],
957                 sizeof(device->model));
958         DBG_DISC("DEV_TYPE: %x VENDOR: %.8s MODEL: %.16s\n",  device->devtype, device->vendor, device->model);
959
960         if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
961                 if (pqisrc_is_external_raid_device(device)) {
962                         device->raid_level = SA_RAID_UNKNOWN;
963                         device->volume_status = SA_LV_OK;
964                         device->volume_offline = false;
965                 }
966                 else {
967                         pqisrc_get_dev_raid_level(softs, device);
968                         pqisrc_get_dev_ioaccel_status(softs, device);
969                         pqisrc_get_dev_vol_status(softs, device);
970                 }
971         }
972
973         /*
974          * Check if this is a One-Button-Disaster-Recovery device
975          * by looking for "$DR-10" at offset 43 in the inquiry data.
976          */
977         device->is_obdr_device = (device->devtype == ROM_DEVICE &&
978                 memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG,
979                         OBDR_SIG_LEN) == 0);
980 err_out:
981         os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE);
982
983         DBG_FUNC("OUT\n");
984         return ret;
985 }
986
987 /*
988  * BMIC (Basic Management And Interface Commands) command
989  * to get the controller identify params
990  */
991 static int
992 pqisrc_identify_ctrl(pqisrc_softstate_t *softs, bmic_ident_ctrl_t *buff)
993 {
994         int ret = PQI_STATUS_SUCCESS;
995         pqisrc_raid_req_t request;
996
997         DBG_FUNC("IN\n");
998
999         memset(&request, 0, sizeof(request));
1000         ret =  pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff),
1001                                 BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1002         DBG_FUNC("OUT\n");
1003
1004         return ret;
1005 }
1006
1007 /* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
1008 int
1009 pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
1010 {
1011         int ret = PQI_STATUS_SUCCESS;
1012         bmic_ident_ctrl_t *identify_ctrl;
1013
1014         DBG_FUNC("IN\n");
1015
1016         identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl));
1017         if (!identify_ctrl) {
1018                 DBG_ERR("failed to allocate memory for identify_ctrl\n");
1019                 return PQI_STATUS_FAILURE;
1020         }
1021
1022         memset(identify_ctrl, 0, sizeof(*identify_ctrl));
1023
1024         ret = pqisrc_identify_ctrl(softs, identify_ctrl);
1025         if (ret)
1026                 goto out;
1027
1028         softs->fw_build_number = identify_ctrl->fw_build_number;
1029         memcpy(softs->fw_version, identify_ctrl->fw_version,
1030                 sizeof(identify_ctrl->fw_version));
1031         softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0';
1032         snprintf(softs->fw_version +
1033                 strlen(softs->fw_version),
1034                 sizeof(softs->fw_version),
1035                 "-%u", identify_ctrl->fw_build_number);
1036 out:
1037         os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
1038         DBG_NOTE("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
1039         DBG_FUNC("OUT\n");
1040         return ret;
1041 }
1042
1043 /* BMIC command to determine scsi device identify params */
1044 static int
1045 pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
1046         pqi_scsi_dev_t *device,
1047         bmic_ident_physdev_t *buff,
1048         int buf_len)
1049 {
1050         int ret = PQI_STATUS_SUCCESS;
1051         uint16_t bmic_device_index;
1052         pqisrc_raid_req_t request;
1053
1054
1055         DBG_FUNC("IN\n");
1056
1057         memset(&request, 0, sizeof(request));
1058         bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
1059         request.cdb[2] = (uint8_t)bmic_device_index;
1060         request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
1061
1062         ret =  pqisrc_build_send_raid_request(softs, &request, buff, buf_len,
1063                                 BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1064         DBG_FUNC("OUT\n");
1065         return ret;
1066 }
1067
1068 /*
1069  * Function used to get the scsi device information using one of BMIC
1070  * BMIC_IDENTIFY_PHYSICAL_DEVICE
1071  */
1072 static void
1073 pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
1074         pqi_scsi_dev_t *device,
1075         bmic_ident_physdev_t *id_phys)
1076 {
1077         int ret = PQI_STATUS_SUCCESS;
1078
1079         DBG_FUNC("IN\n");
1080         memset(id_phys, 0, sizeof(*id_phys));
1081
1082         ret= pqisrc_identify_physical_disk(softs, device,
1083                 id_phys, sizeof(*id_phys));
1084         if (ret) {
1085                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1086                 return;
1087         }
1088
1089         device->queue_depth =
1090                 LE_16(id_phys->current_queue_depth_limit);
1091         device->device_type = id_phys->device_type;
1092         device->active_path_index = id_phys->active_path_number;
1093         device->path_map = id_phys->redundant_path_present_map;
1094         memcpy(&device->box,
1095                 &id_phys->alternate_paths_phys_box_on_port,
1096                 sizeof(device->box));
1097         memcpy(&device->phys_connector,
1098                 &id_phys->alternate_paths_phys_connector,
1099                 sizeof(device->phys_connector));
1100         device->bay = id_phys->phys_bay_in_box;
1101
1102         DBG_DISC("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n",  device->device_type, device->queue_depth);
1103         DBG_FUNC("OUT\n");
1104 }
1105
1106
1107 /* Function used to find the entry of the device in a list */
1108 static
1109 device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
1110         pqi_scsi_dev_t *device_to_find, pqi_scsi_dev_t **same_device)
1111 {
1112         pqi_scsi_dev_t *device;
1113         int i,j;
1114         DBG_FUNC("IN\n");
1115         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1116                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1117                         if(softs->device_list[i][j] == NULL)
1118                                 continue;
1119                         device = softs->device_list[i][j];
1120                         if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
1121                                 device->scsi3addr)) {
1122                                 *same_device = device;
1123                                 if (pqisrc_device_equal(device_to_find, device)) {
1124                                         if (device_to_find->volume_offline)
1125                                                 return DEVICE_CHANGED;
1126                                         return DEVICE_UNCHANGED;
1127                                 }
1128                                 return DEVICE_CHANGED;
1129                         }
1130                 }
1131         }
1132         DBG_FUNC("OUT\n");
1133
1134         return DEVICE_NOT_FOUND;
1135 }
1136
1137
1138 /* Update the newly added devices as existed device */
1139 static void
1140 pqisrc_exist_device_update(pqisrc_softstate_t *softs,
1141         pqi_scsi_dev_t *device_exist, pqi_scsi_dev_t *new_device)
1142 {
1143         DBG_FUNC("IN\n");
1144         device_exist->expose_device = new_device->expose_device;
1145         memcpy(device_exist->vendor, new_device->vendor,
1146                 sizeof(device_exist->vendor));
1147         memcpy(device_exist->model, new_device->model,
1148                 sizeof(device_exist->model));
1149         device_exist->is_physical_device = new_device->is_physical_device;
1150         device_exist->is_external_raid_device =
1151                 new_device->is_external_raid_device;
1152
1153         if ((device_exist->volume_status == SA_LV_QUEUED_FOR_EXPANSION ||
1154                 device_exist->volume_status == SA_LV_UNDERGOING_EXPANSION) &&
1155                 new_device->volume_status == SA_LV_OK) {
1156                 device_exist->scsi_rescan = true;
1157         }
1158
1159         device_exist->sas_address = new_device->sas_address;
1160         device_exist->raid_level = new_device->raid_level;
1161         device_exist->queue_depth = new_device->queue_depth;
1162         device_exist->ioaccel_handle = new_device->ioaccel_handle;
1163         device_exist->volume_status = new_device->volume_status;
1164         device_exist->active_path_index = new_device->active_path_index;
1165         device_exist->path_map = new_device->path_map;
1166         device_exist->bay = new_device->bay;
1167         memcpy(device_exist->box, new_device->box,
1168                 sizeof(device_exist->box));
1169         memcpy(device_exist->phys_connector, new_device->phys_connector,
1170                 sizeof(device_exist->phys_connector));
1171         device_exist->offload_config = new_device->offload_config;
1172         device_exist->offload_enabled_pending =
1173                 new_device->offload_enabled_pending;
1174         device_exist->offload_to_mirror = 0;
1175         if (device_exist->raid_map)
1176                 os_mem_free(softs,
1177                             (char *)device_exist->raid_map,
1178                             sizeof(*device_exist->raid_map));
1179         device_exist->raid_map = new_device->raid_map;
1180         /* To prevent this from being freed later. */
1181         new_device->raid_map = NULL;
1182         DBG_FUNC("OUT\n");
1183 }
1184
1185 /* Validate the ioaccel_handle for a newly added device */
1186 static
1187 pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
1188         pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
1189 {
1190         pqi_scsi_dev_t *device;
1191         int i,j;
1192         DBG_FUNC("IN\n");
1193         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1194                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1195                         if(softs->device_list[i][j] == NULL)
1196                                 continue;
1197                         device = softs->device_list[i][j];
1198                         if (device->devtype != DISK_DEVICE)
1199                                 continue;
1200                         if (pqisrc_is_logical_device(device))
1201                                 continue;
1202                         if (device->ioaccel_handle == ioaccel_handle)
1203                                 return device;
1204                 }
1205         }
1206         DBG_FUNC("OUT\n");
1207
1208         return NULL;
1209 }
1210
1211 /* Get the scsi device queue depth */
1212 static void
1213 pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
1214 {
1215         unsigned i;
1216         unsigned phys_dev_num;
1217         unsigned num_raidmap_entries;
1218         unsigned queue_depth;
1219         pqisrc_raid_map_t *raid_map;
1220         pqi_scsi_dev_t *device;
1221         raidmap_data_t *dev_data;
1222         pqi_scsi_dev_t *phys_disk;
1223         unsigned j;
1224         unsigned k;
1225
1226         DBG_FUNC("IN\n");
1227
1228         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1229                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1230                         if(softs->device_list[i][j] == NULL)
1231                                 continue;
1232                         device = softs->device_list[i][j];
1233                         if (device->devtype != DISK_DEVICE)
1234                                 continue;
1235                         if (!pqisrc_is_logical_device(device))
1236                                 continue;
1237                         if (pqisrc_is_external_raid_device(device))
1238                                 continue;
1239                         device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1240                         raid_map = device->raid_map;
1241                         if (!raid_map)
1242                                 return;
1243                         dev_data = raid_map->dev_data;
1244                         phys_dev_num = LE_16(raid_map->layout_map_count) *
1245                                         (LE_16(raid_map->data_disks_per_row) +
1246                                         LE_16(raid_map->metadata_disks_per_row));
1247                         num_raidmap_entries = phys_dev_num *
1248                                                 LE_16(raid_map->row_cnt);
1249
1250                         queue_depth = 0;
1251                         for (k = 0; k < num_raidmap_entries; k++) {
1252                                 phys_disk = pqisrc_identify_device_via_ioaccel(softs,
1253                                                 dev_data[k].ioaccel_handle);
1254
1255                                 if (!phys_disk) {
1256                                         DBG_WARN(
1257                                         "Failed to find physical disk handle for logical drive %016llx\n",
1258                                                 (unsigned long long)BE_64(device->scsi3addr[0]));
1259                                         device->offload_enabled = false;
1260                                         device->offload_enabled_pending = false;
1261                                         if (raid_map)
1262                                                 os_mem_free(softs, (char *)raid_map, sizeof(*raid_map));
1263                                         device->raid_map = NULL;
1264                                         return;
1265                                 }
1266
1267                                 queue_depth += phys_disk->queue_depth;
1268                         }
1269
1270                         device->queue_depth = queue_depth;
1271                 } /* end inner loop */
1272         }/* end outer loop */
1273         DBG_FUNC("OUT\n");
1274 }
1275
1276 /* Function used to add a scsi device to OS scsi subsystem */
1277 static int
1278 pqisrc_add_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1279 {
1280         DBG_FUNC("IN\n");
1281         DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1282                 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1283
1284         device->invalid = false;
1285
1286         if(device->expose_device) {
1287                 pqisrc_init_device_active_io(softs, device);
1288                 /* TBD: Call OS upper layer function to add the device entry */
1289                 os_add_device(softs,device);
1290         }
1291         DBG_FUNC("OUT\n");
1292         return PQI_STATUS_SUCCESS;
1293
1294 }
1295
1296 /* Function used to remove a scsi device from OS scsi subsystem */
1297 void
1298 pqisrc_remove_device(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1299 {
1300         DBG_FUNC("IN\n");
1301         DBG_NOTE("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1302                 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1303
1304         device->invalid = true;
1305         if (device->expose_device == false) {
1306                 /*Masked physical devices are not been exposed to storage stack.
1307                 *Hence, free the masked device resources such as
1308                 *device memory, Target ID,etc., here.
1309                 */
1310                 DBG_NOTE("Deallocated Masked Device Resources.\n");
1311                 pqisrc_free_device(softs,device);
1312                 return;
1313         }
1314         /* Wait for device outstanding Io's */
1315         pqisrc_wait_for_device_commands_to_complete(softs, device);
1316         /* Call OS upper layer function to remove the exposed device entry */
1317         os_remove_device(softs,device);
1318         DBG_FUNC("OUT\n");
1319 }
1320
1321 /*
1322  * When exposing new device to OS fails then adjst list according to the
1323  * mid scsi list
1324  */
1325 static void
1326 pqisrc_adjust_list(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1327 {
1328         DBG_FUNC("IN\n");
1329
1330         if (!device) {
1331                 DBG_ERR("softs = %p: device is NULL !!!\n", softs);
1332                 return;
1333         }
1334
1335         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1336         softs->device_list[device->target][device->lun] = NULL;
1337         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1338         pqisrc_device_mem_free(softs, device);
1339
1340         DBG_FUNC("OUT\n");
1341 }
1342
1343 /* Debug routine used to display the RAID volume status of the device */
1344 static void
1345 pqisrc_display_volume_status(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1346 {
1347         char *status;
1348
1349         DBG_FUNC("IN\n");
1350         switch (device->volume_status) {
1351         case SA_LV_OK:
1352                 status = "Volume is online.";
1353                 break;
1354         case SA_LV_UNDERGOING_ERASE:
1355                 status = "Volume is undergoing background erase process.";
1356                 break;
1357         case SA_LV_NOT_AVAILABLE:
1358                 status = "Volume is waiting for transforming volume.";
1359                 break;
1360         case SA_LV_UNDERGOING_RPI:
1361                 status = "Volume is undergoing rapid parity initialization process.";
1362                 break;
1363         case SA_LV_PENDING_RPI:
1364                 status = "Volume is queued for rapid parity initialization process.";
1365                 break;
1366         case SA_LV_ENCRYPTED_NO_KEY:
1367                 status = "Volume is encrypted and cannot be accessed because key is not present.";
1368                 break;
1369         case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1370                 status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.";
1371                 break;
1372         case SA_LV_UNDERGOING_ENCRYPTION:
1373                 status = "Volume is undergoing encryption process.";
1374                 break;
1375         case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1376                 status = "Volume is undergoing encryption re-keying process.";
1377                 break;
1378         case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1379                 status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled.";
1380                 break;
1381         case SA_LV_PENDING_ENCRYPTION:
1382                 status = "Volume is pending migration to encrypted state, but process has not started.";
1383                 break;
1384         case SA_LV_PENDING_ENCRYPTION_REKEYING:
1385                 status = "Volume is encrypted and is pending encryption rekeying.";
1386                 break;
1387         case SA_LV_STATUS_VPD_UNSUPPORTED:
1388                 status = "Volume status is not available through vital product data pages.";
1389                 break;
1390         case SA_LV_UNDERGOING_EXPANSION:
1391                 status = "Volume undergoing expansion";
1392                 break;
1393         case SA_LV_QUEUED_FOR_EXPANSION:
1394                 status = "Volume queued for expansion";
1395         case SA_LV_EJECTED:
1396                 status = "Volume ejected";
1397                 break;
1398         case SA_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1399                 status = "Volume has wrong physical drive replaced";
1400                 break;
1401         case SA_LV_DISABLED_SCSI_ID_CONFLICT:
1402                 status = "Volume disabled scsi id conflict";
1403                 break;
1404         case SA_LV_HARDWARE_HAS_OVERHEATED:
1405                 status = "Volume hardware has over heated";
1406                 break;
1407         case SA_LV_HARDWARE_OVERHEATING:
1408                 status = "Volume hardware over heating";
1409                 break;
1410         case SA_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1411                 status = "Volume physical drive connection problem";
1412                 break;
1413         default:
1414                 status = "Volume is in an unknown state.";
1415                 break;
1416         }
1417
1418         DBG_DISC("scsi BTL %d:%d:%d %s\n",
1419                 device->bus, device->target, device->lun, status);
1420         DBG_FUNC("OUT\n");
1421 }
1422
1423 void
1424 pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1425 {
1426         DBG_FUNC("IN\n");
1427         if (!device)
1428                 return;
1429         if (device->raid_map) {
1430                         os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t));
1431         }
1432         os_mem_free(softs, (char *)device,sizeof(*device));
1433         DBG_FUNC("OUT\n");
1434
1435 }
1436
1437 /* OS should call this function to free the scsi device */
1438 void
1439 pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
1440 {
1441         rcb_t *rcb;
1442         int i;
1443
1444         /* Clear the "device" field in the rcb.
1445          * Response coming after device removal shouldn't access this field
1446          */
1447         for(i = 1; i <= softs->max_outstanding_io; i++)
1448         {
1449                 rcb = &softs->rcb[i];
1450                 if(rcb->dvp == device) {
1451                         DBG_WARN("Pending requests for the removing device\n");
1452                         rcb->dvp = NULL;
1453                 }
1454         }
1455
1456         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1457
1458         if (!pqisrc_is_logical_device(device)) {
1459                 pqisrc_free_tid(softs,device->target);
1460         }
1461
1462         softs->device_list[device->target][device->lun] = NULL;
1463
1464         pqisrc_device_mem_free(softs, device);
1465
1466         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1467
1468 }
1469
1470 /* Update the newly added devices to the device list */
1471 static void
1472 pqisrc_update_device_list(pqisrc_softstate_t *softs,
1473         pqi_scsi_dev_t *new_device_list[], int num_new_devices)
1474 {
1475         int ret;
1476         int i;
1477         device_status_t dev_status;
1478         pqi_scsi_dev_t *device;
1479         pqi_scsi_dev_t *same_device;
1480         pqi_scsi_dev_t **added = NULL;
1481         pqi_scsi_dev_t **removed = NULL;
1482         int nadded = 0, nremoved = 0;
1483         int j;
1484         int tid = 0;
1485         boolean_t driver_queue_depth_flag = false;
1486
1487         DBG_FUNC("IN\n");
1488
1489         added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES);
1490         removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES);
1491
1492         if (!added || !removed) {
1493                 DBG_WARN("Out of memory \n");
1494                 goto free_and_out;
1495         }
1496
1497         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1498
1499         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1500                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1501                         if(softs->device_list[i][j] == NULL)
1502                                 continue;
1503                         device = softs->device_list[i][j];
1504                         device->device_gone = true;
1505                 }
1506         }
1507         DBG_IO("Device list used an array\n");
1508         for (i = 0; i < num_new_devices; i++) {
1509                 device = new_device_list[i];
1510
1511                 dev_status = pqisrc_scsi_find_entry(softs, device,
1512                         &same_device);
1513
1514                 switch (dev_status) {
1515                 case DEVICE_UNCHANGED:
1516                         /* New Device present in existing device list  */
1517                         device->new_device = false;
1518                         same_device->device_gone = false;
1519                         pqisrc_exist_device_update(softs, same_device, device);
1520                         break;
1521                 case DEVICE_NOT_FOUND:
1522                         /* Device not found in existing list */
1523                         device->new_device = true;
1524                         break;
1525                 case DEVICE_CHANGED:
1526                         /* Actual device gone need to add device to list*/
1527                         device->new_device = true;
1528                         break;
1529                 default:
1530                         break;
1531                 }
1532         }
1533         /* Process all devices that have gone away. */
1534         for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) {
1535                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1536                         if(softs->device_list[i][j] == NULL)
1537                                 continue;
1538                         device = softs->device_list[i][j];
1539                         if (device->device_gone) {
1540                                 softs->device_list[device->target][device->lun] = NULL;
1541                                 removed[nremoved] = device;
1542                                 nremoved++;
1543                         }
1544                 }
1545         }
1546
1547         /* Process all new devices. */
1548         for (i = 0, nadded = 0; i < num_new_devices; i++) {
1549                 device = new_device_list[i];
1550                 if (!device->new_device)
1551                         continue;
1552                 if (device->volume_offline)
1553                         continue;
1554
1555                 /* physical device */
1556                 if (!pqisrc_is_logical_device(device)) {
1557                         tid = pqisrc_alloc_tid(softs);
1558                         if(INVALID_ELEM != tid)
1559                                 pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, tid, 0);
1560                 }
1561
1562                 /* This is not expected. We may lose the reference to the old device entry.
1563                 * If the target & lun ids are same, it is supposed to detect as an existing
1564                 * device, and not as a new device
1565                 */
1566                 if(softs->device_list[device->target][device->lun] != NULL) {
1567                         DBG_WARN("Overwriting T : %d L  :%d\n",device->target,device->lun);
1568                 }
1569
1570                 softs->device_list[device->target][device->lun] = device;
1571
1572                 DBG_DISC("Added device %p at B : %d T : %d L : %d\n",device,
1573                         device->bus,device->target,device->lun);
1574                 /* To prevent this entry from being freed later. */
1575                 new_device_list[i] = NULL;
1576                 added[nadded] = device;
1577                 nadded++;
1578         }
1579
1580
1581         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1582                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1583                         if(softs->device_list[i][j] == NULL)
1584                                 continue;
1585                         device = softs->device_list[i][j];
1586                         device->offload_enabled = device->offload_enabled_pending;
1587                 }
1588         }
1589
1590         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1591
1592         for(i = 0; i < nremoved; i++) {
1593                 device = removed[i];
1594                 if (device == NULL)
1595                         continue;
1596                 pqisrc_display_device_info(softs, "removed", device);
1597                 pqisrc_remove_device(softs, device);
1598                 
1599         }
1600
1601         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1602                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1603                         if(softs->device_list[i][j] == NULL)
1604                                 continue;
1605                         device = softs->device_list[i][j];
1606                         /*
1607                         * Notify the OS upper layer if the queue depth of any existing device has
1608                         * changed.
1609                         */
1610                         if (device->queue_depth !=
1611                                 device->advertised_queue_depth) {
1612                                 device->advertised_queue_depth = device->queue_depth;
1613                                 /* TBD: Call OS upper layer function to change device Q depth */
1614                         }
1615                         if (device->firmware_queue_depth_set == false)
1616                                 driver_queue_depth_flag = true;
1617                         if (device->scsi_rescan)
1618                                 os_rescan_target(softs, device);
1619                 }
1620         }
1621         /*
1622         * If firmware queue depth is corrupt or not working
1623         * use driver method to re-calculate the queue depth
1624         * for all logical devices
1625         */
1626         if (driver_queue_depth_flag)
1627                 pqisrc_update_log_dev_qdepth(softs);
1628
1629         for(i = 0; i < nadded; i++) {
1630                 device = added[i];
1631                 if (device->expose_device) {
1632                         ret = pqisrc_add_device(softs, device);
1633                         if (ret) {
1634                                 DBG_WARN("scsi %d:%d:%d addition failed, device not added\n",
1635                                         device->bus, device->target,
1636                                         device->lun);
1637                                 pqisrc_adjust_list(softs, device);
1638                                 continue;
1639                         }
1640                 }
1641
1642                 pqisrc_display_device_info(softs, "added", device);
1643         }
1644
1645         /* Process all volumes that are offline. */
1646         for (i = 0; i < num_new_devices; i++) {
1647                 device = new_device_list[i];
1648                 if (!device)
1649                         continue;
1650                 if (!device->new_device)
1651                         continue;
1652                 if (device->volume_offline) {
1653                         pqisrc_display_volume_status(softs, device);
1654                         pqisrc_display_device_info(softs, "offline", device);
1655                 }
1656         }
1657
1658 free_and_out:
1659         if (added)
1660                 os_mem_free(softs, (char *)added,
1661                             sizeof(*added) * PQI_MAX_DEVICES);
1662         if (removed)
1663                 os_mem_free(softs, (char *)removed,
1664                             sizeof(*removed) * PQI_MAX_DEVICES);
1665
1666         DBG_FUNC("OUT\n");
1667 }
1668
1669 /*
1670  * Let the Adapter know about driver version using one of BMIC
1671  * BMIC_WRITE_HOST_WELLNESS
1672  */
1673 int
1674 pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
1675 {
1676         int rval = PQI_STATUS_SUCCESS;
1677         struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
1678         size_t data_length;
1679         pqisrc_raid_req_t request;
1680
1681         DBG_FUNC("IN\n");
1682
1683         memset(&request, 0, sizeof(request));
1684         data_length = sizeof(*host_wellness_driver_ver);
1685
1686         host_wellness_driver_ver = os_mem_alloc(softs, data_length);
1687         if (!host_wellness_driver_ver) {
1688                 DBG_ERR("failed to allocate memory for host wellness driver_version\n");
1689                 return PQI_STATUS_FAILURE;
1690         }
1691
1692         host_wellness_driver_ver->start_tag[0] = '<';
1693         host_wellness_driver_ver->start_tag[1] = 'H';
1694         host_wellness_driver_ver->start_tag[2] = 'W';
1695         host_wellness_driver_ver->start_tag[3] = '>';
1696         host_wellness_driver_ver->driver_version_tag[0] = 'D';
1697         host_wellness_driver_ver->driver_version_tag[1] = 'V';
1698         host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version));
1699         strncpy(host_wellness_driver_ver->driver_version, softs->os_name,
1700         sizeof(host_wellness_driver_ver->driver_version));
1701     if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) {
1702         strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION,
1703                         sizeof(host_wellness_driver_ver->driver_version) -  strlen(softs->os_name));
1704     } else {
1705         DBG_DISC("OS name length(%lu) is longer than buffer of driver_version\n",
1706             strlen(softs->os_name));
1707
1708     }
1709         host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
1710         host_wellness_driver_ver->end_tag[0] = 'Z';
1711         host_wellness_driver_ver->end_tag[1] = 'Z';
1712
1713         rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length,
1714                                         BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1715
1716         os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
1717
1718         DBG_FUNC("OUT");
1719         return rval;
1720 }
1721
1722 /*
1723  * Write current RTC time from host to the adapter using
1724  * BMIC_WRITE_HOST_WELLNESS
1725  */
1726 int
1727 pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
1728 {
1729         int rval = PQI_STATUS_SUCCESS;
1730         struct bmic_host_wellness_time *host_wellness_time;
1731         size_t data_length;
1732         pqisrc_raid_req_t request;
1733
1734         DBG_FUNC("IN\n");
1735
1736         memset(&request, 0, sizeof(request));
1737         data_length = sizeof(*host_wellness_time);
1738
1739         host_wellness_time = os_mem_alloc(softs, data_length);
1740         if (!host_wellness_time) {
1741                 DBG_ERR("failed to allocate memory for host wellness time structure\n");
1742                 return PQI_STATUS_FAILURE;
1743         }
1744
1745         host_wellness_time->start_tag[0] = '<';
1746         host_wellness_time->start_tag[1] = 'H';
1747         host_wellness_time->start_tag[2] = 'W';
1748         host_wellness_time->start_tag[3] = '>';
1749         host_wellness_time->time_tag[0] = 'T';
1750         host_wellness_time->time_tag[1] = 'D';
1751         host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) -
1752                                                                                         offsetof(struct bmic_host_wellness_time, century));
1753
1754         os_get_time(host_wellness_time);
1755
1756         host_wellness_time->dont_write_tag[0] = 'D';
1757         host_wellness_time->dont_write_tag[1] = 'W';
1758         host_wellness_time->end_tag[0] = 'Z';
1759         host_wellness_time->end_tag[1] = 'Z';
1760
1761         rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length,
1762                                         BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1763
1764         os_mem_free(softs, (char *)host_wellness_time, data_length);
1765
1766         DBG_FUNC("OUT");
1767         return rval;
1768 }
1769
1770 /*
1771  * Function used to perform a rescan of scsi devices
1772  * for any config change events
1773  */
1774 int
1775 pqisrc_scan_devices(pqisrc_softstate_t *softs)
1776 {
1777         boolean_t is_physical_device;
1778         int ret = PQI_STATUS_FAILURE;
1779         int i;
1780         int new_dev_cnt;
1781         int phy_log_dev_cnt;
1782         size_t queue_log_data_length;
1783         uint8_t *scsi3addr;
1784         uint8_t multiplier;
1785         uint16_t qdepth;
1786         uint32_t physical_cnt;
1787         uint32_t logical_cnt;
1788         uint32_t logical_queue_cnt;
1789         uint32_t ndev_allocated = 0;
1790         size_t phys_data_length, log_data_length;
1791         reportlun_data_ext_t *physical_dev_list = NULL;
1792         reportlun_data_ext_t *logical_dev_list = NULL;
1793         reportlun_ext_entry_t *lun_ext_entry = NULL;
1794         reportlun_queue_depth_data_t *logical_queue_dev_list = NULL;
1795         bmic_ident_physdev_t *bmic_phy_info = NULL;
1796         pqi_scsi_dev_t **new_device_list = NULL;
1797         pqi_scsi_dev_t *device = NULL;
1798         
1799
1800         DBG_FUNC("IN\n");
1801
1802         ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
1803                                         &logical_queue_dev_list, &queue_log_data_length,
1804                                         &phys_data_length, &log_data_length);
1805
1806         if (ret)
1807                 goto err_out;
1808
1809         physical_cnt = BE_32(physical_dev_list->header.list_length)
1810                 / sizeof(physical_dev_list->lun_entries[0]);
1811
1812         logical_cnt = BE_32(logical_dev_list->header.list_length)
1813                 / sizeof(logical_dev_list->lun_entries[0]);
1814
1815         logical_queue_cnt = BE_32(logical_queue_dev_list->header.list_length)
1816                 / sizeof(logical_queue_dev_list->lun_entries[0]);
1817
1818
1819         DBG_DISC("physical_cnt %d logical_cnt %d queue_cnt %d\n", physical_cnt, logical_cnt, logical_queue_cnt);
1820
1821         if (physical_cnt) {
1822                 bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
1823                 if (bmic_phy_info == NULL) {
1824                         ret = PQI_STATUS_FAILURE;
1825                         DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret);
1826                         goto err_out;
1827                 }
1828         }
1829         phy_log_dev_cnt = physical_cnt + logical_cnt;
1830         new_device_list = os_mem_alloc(softs,
1831                                 sizeof(*new_device_list) * phy_log_dev_cnt);
1832
1833         if (new_device_list == NULL) {
1834                 ret = PQI_STATUS_FAILURE;
1835                 DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1836                 goto err_out;
1837         }
1838
1839         for (i = 0; i < phy_log_dev_cnt; i++) {
1840                 new_device_list[i] = os_mem_alloc(softs,
1841                                                 sizeof(*new_device_list[i]));
1842                 if (new_device_list[i] == NULL) {
1843                         ret = PQI_STATUS_FAILURE;
1844                         DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1845                         ndev_allocated = i;
1846                         goto err_out;
1847                 }
1848         }
1849
1850         ndev_allocated = phy_log_dev_cnt;
1851         new_dev_cnt = 0;
1852         for (i = 0; i < phy_log_dev_cnt; i++) {
1853
1854                 if (i < physical_cnt) {
1855                         is_physical_device = true;
1856                         lun_ext_entry = &physical_dev_list->lun_entries[i];
1857                 } else {
1858                         is_physical_device = false;
1859                         lun_ext_entry =
1860                                 &logical_dev_list->lun_entries[i - physical_cnt];
1861                 }
1862
1863                 scsi3addr = lun_ext_entry->lunid;
1864
1865                 /* Save the target sas adderess for external raid device */
1866                 if(lun_ext_entry->device_type == CONTROLLER_DEVICE) {
1867                         int target = lun_ext_entry->lunid[3] & 0x3f;
1868                         softs->target_sas_addr[target] = BE_64(lun_ext_entry->wwid);
1869                 }
1870
1871                 /* Skip masked physical non-disk devices. */
1872                 if (MASKED_DEVICE(scsi3addr) && is_physical_device
1873                                 && (lun_ext_entry->ioaccel_handle == 0))
1874                         continue;
1875
1876                 device = new_device_list[new_dev_cnt];
1877                 memset(device, 0, sizeof(*device));
1878                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1879                 device->wwid = lun_ext_entry->wwid;
1880                 device->is_physical_device = is_physical_device;
1881                 if (!is_physical_device && logical_queue_cnt--) {
1882                         device->is_external_raid_device =
1883                                 pqisrc_is_external_raid_addr(scsi3addr);
1884                         /* The multiplier is the value we multiply the queue
1885                          * depth value with to get the actual queue depth.
1886                          * If multiplier is 1 multiply by 256 if
1887                          * multiplier 0 then multiply by 16 */
1888                         multiplier = logical_queue_dev_list->lun_entries[i - physical_cnt].multiplier;
1889                         qdepth = logical_queue_dev_list->lun_entries[i - physical_cnt].queue_depth;
1890                         if (multiplier) {
1891                                 device->firmware_queue_depth_set = true;
1892                                 device->queue_depth = qdepth*256;
1893                         } else {
1894                                 device->firmware_queue_depth_set = true;
1895                                 device->queue_depth = qdepth*16;
1896                         }
1897                         if (device->queue_depth > softs->adapterQDepth) {
1898                                 device->firmware_queue_depth_set = true;
1899                                 device->queue_depth = softs->adapterQDepth;
1900                         }
1901                         if ((multiplier == 1) &&
1902                                 (qdepth <= 0 || qdepth >= MAX_RAW_M256_QDEPTH))
1903                                 device->firmware_queue_depth_set = false;
1904                         if ((multiplier == 0) &&
1905                                 (qdepth <= 0 || qdepth >= MAX_RAW_M16_QDEPTH))
1906                                 device->firmware_queue_depth_set = false;
1907                 }
1908
1909
1910                 /* Get device type, vendor, model, device ID. */
1911                 ret = pqisrc_get_dev_data(softs, device);
1912                 if (ret) {
1913                         DBG_WARN("Inquiry failed, skipping device %016llx\n",
1914                                  (unsigned long long)BE_64(device->scsi3addr[0]));
1915                         DBG_DISC("INQUIRY FAILED \n");
1916                         continue;
1917                 }
1918                 /* Set controller queue depth to what
1919                  * it was from the scsi midlayer */
1920                 if (device->devtype == RAID_DEVICE) {
1921                         device->firmware_queue_depth_set = true;
1922                         device->queue_depth = softs->adapterQDepth;
1923                 }
1924                 pqisrc_assign_btl(device);
1925
1926                 /*
1927                  * Expose all devices except for physical devices that
1928                  * are masked.
1929                  */
1930                 if (device->is_physical_device &&
1931                         MASKED_DEVICE(scsi3addr))
1932                         device->expose_device = false;
1933                 else
1934                         device->expose_device = true;
1935
1936                 if (device->is_physical_device &&
1937                     (lun_ext_entry->device_flags &
1938                      REPORT_LUN_DEV_FLAG_AIO_ENABLED) &&
1939                      lun_ext_entry->ioaccel_handle) {
1940                         device->aio_enabled = true;
1941                 }
1942                 switch (device->devtype) {
1943                 case ROM_DEVICE:
1944                         /*
1945                          * We don't *really* support actual CD-ROM devices,
1946                          * but we do support the HP "One Button Disaster
1947                          * Recovery" tape drive which temporarily pretends to
1948                          * be a CD-ROM drive.
1949                          */
1950                         if (device->is_obdr_device)
1951                                 new_dev_cnt++;
1952                         break;
1953                 case DISK_DEVICE:
1954                 case ZBC_DEVICE:
1955                         if (device->is_physical_device) {
1956                                 device->ioaccel_handle =
1957                                         lun_ext_entry->ioaccel_handle;
1958                                 device->sas_address = BE_64(lun_ext_entry->wwid);
1959                                 pqisrc_get_physical_device_info(softs, device,
1960                                         bmic_phy_info);
1961                         }
1962                         new_dev_cnt++;
1963                         break;
1964                 case ENCLOSURE_DEVICE:
1965                         if (device->is_physical_device) {
1966                                 device->sas_address = BE_64(lun_ext_entry->wwid);
1967                         }
1968                         new_dev_cnt++;
1969                         break;
1970                 case TAPE_DEVICE:
1971                 case MEDIUM_CHANGER_DEVICE:
1972                         new_dev_cnt++;
1973                         break;
1974                 case RAID_DEVICE:
1975                         /*
1976                          * Only present the HBA controller itself as a RAID
1977                          * controller.  If it's a RAID controller other than
1978                          * the HBA itself (an external RAID controller, MSA500
1979                          * or similar), don't present it.
1980                          */
1981                         if (pqisrc_is_hba_lunid(scsi3addr))
1982                                 new_dev_cnt++;
1983                         break;
1984                 case SES_DEVICE:
1985                 case CONTROLLER_DEVICE:
1986                 default:
1987                         break;
1988                 }
1989         }
1990         DBG_DISC("new_dev_cnt %d\n", new_dev_cnt);
1991
1992         pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
1993
1994 err_out:
1995         if (new_device_list) {
1996                 for (i = 0; i < ndev_allocated; i++) {
1997                         if (new_device_list[i]) {
1998                                 if(new_device_list[i]->raid_map)
1999                                         os_mem_free(softs, (char *)new_device_list[i]->raid_map,
2000                                                                                 sizeof(pqisrc_raid_map_t));
2001                                 os_mem_free(softs, (char*)new_device_list[i],
2002                                                                 sizeof(*new_device_list[i]));
2003                         }
2004                 }
2005                 os_mem_free(softs, (char *)new_device_list,
2006                                 sizeof(*new_device_list) * ndev_allocated);
2007         }
2008         if(physical_dev_list)
2009                 os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
2010         if(logical_dev_list)
2011                 os_mem_free(softs, (char *)logical_dev_list, log_data_length);
2012         if(logical_queue_dev_list)
2013                 os_mem_free(softs, (char*)logical_queue_dev_list,
2014                         queue_log_data_length);
2015         if (bmic_phy_info)
2016                 os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
2017
2018         DBG_FUNC("OUT \n");
2019
2020         return ret;
2021 }
2022
2023 /*
2024  * Clean up memory allocated for devices.
2025  */
2026 void
2027 pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
2028 {
2029
2030         int i = 0,j = 0;
2031         pqi_scsi_dev_t *dvp = NULL;
2032         DBG_FUNC("IN\n");
2033
2034         for(i = 0; i < PQI_MAX_DEVICES; i++) {
2035                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
2036                         if (softs->device_list[i][j] == NULL)
2037                                 continue;
2038                         dvp = softs->device_list[i][j];
2039                         pqisrc_device_mem_free(softs, dvp);
2040                 }
2041         }
2042         DBG_FUNC("OUT\n");
2043 }