]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_discovery.c
Connect the installation page to the build.
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_discovery.c
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /* $FreeBSD$ */
28
29 #include "smartpqi_includes.h"
30
31 /* Validate the scsi sense response code */
32 static inline boolean_t pqisrc_scsi_sense_valid(const struct sense_header_scsi *sshdr)
33 {
34         DBG_FUNC("IN\n");
35
36         if (!sshdr)
37                 return false;
38
39         DBG_FUNC("OUT\n");
40
41         return (sshdr->response_code & 0x70) == 0x70;
42 }
43
44 /* Update scsi sense info to a local buffer*/
45 boolean_t pqisrc_update_scsi_sense(const uint8_t *buff, int len,
46                               struct sense_header_scsi *header)
47 {
48
49         DBG_FUNC("IN\n");
50
51         if (!buff || !len)
52                 return false;
53
54         memset(header, 0, sizeof(struct sense_header_scsi));
55
56         header->response_code = (buff[0] & 0x7f);
57
58         if (!pqisrc_scsi_sense_valid(header))
59                 return false;
60
61         if (header->response_code >= 0x72) {
62                 /* descriptor format */
63                 if (len > 1)
64                         header->sense_key = (buff[1] & 0xf);
65                 if (len > 2)
66                         header->asc = buff[2];
67                 if (len > 3)
68                         header->ascq = buff[3];
69                 if (len > 7)
70                         header->additional_length = buff[7];
71         } else {
72                  /* fixed format */
73                 if (len > 2)
74                         header->sense_key = (buff[2] & 0xf);
75                 if (len > 7) {
76                         len = (len < (buff[7] + 8)) ?
77                                         len : (buff[7] + 8);
78                         if (len > 12)
79                                 header->asc = buff[12];
80                         if (len > 13)
81                                 header->ascq = buff[13];
82                 }
83         }
84
85         DBG_FUNC("OUT\n");
86
87         return true;
88 }
89
90 /*
91  * Function used to build the internal raid request and analyze the response
92  */
93 int pqisrc_build_send_raid_request(pqisrc_softstate_t *softs,  pqisrc_raid_req_t *request,
94                             void *buff, size_t datasize, uint8_t cmd, uint16_t vpd_page, uint8_t *scsi3addr,
95                             raid_path_error_info_elem_t *error_info)
96 {
97         
98         uint8_t *cdb;
99         int ret = PQI_STATUS_SUCCESS;
100         uint32_t tag = 0;
101         struct dma_mem device_mem;
102         sgt_t *sgd;
103
104         ib_queue_t *ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
105         ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
106
107         rcb_t *rcb = NULL;
108         
109         DBG_FUNC("IN\n");
110
111         memset(&device_mem, 0, sizeof(struct dma_mem));
112
113         /* for TUR datasize: 0 buff: NULL */
114         if (datasize) {
115                 device_mem.tag = "device_mem";
116                 device_mem.size = datasize;
117                 device_mem.align = PQISRC_DEFAULT_DMA_ALIGN;
118
119                 ret = os_dma_mem_alloc(softs, &device_mem);
120         
121                 if (ret) {
122                         DBG_ERR("failed to allocate dma memory for device_mem return code %d\n", ret);
123                         return ret;
124                 }
125
126                 sgd = (sgt_t *)&request->sg_descriptors[0];
127
128                 sgd->addr = device_mem.dma_addr;
129                 sgd->len = datasize;
130                 sgd->flags = SG_FLAG_LAST;
131
132         }
133
134         /* Build raid path request */
135         request->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
136
137         request->header.iu_length = LE_16(offsetof(pqisrc_raid_req_t,
138                                                         sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH);
139         request->buffer_length = LE_32(datasize);
140         memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
141         request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
142         request->additional_cdb_bytes_usage = PQI_ADDITIONAL_CDB_BYTES_0;
143
144         cdb = request->cdb;
145
146         switch (cmd) {
147         case SA_INQUIRY:
148                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
149                 cdb[0] = SA_INQUIRY;
150                 if (vpd_page & VPD_PAGE) {
151                         cdb[1] = 0x1;
152                         cdb[2] = (uint8_t)vpd_page;
153                 }
154                 cdb[4] = (uint8_t)datasize;
155                 break;
156         case SA_REPORT_LOG:
157         case SA_REPORT_PHYS:
158                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
159                 cdb[0] = cmd;
160                 if (cmd == SA_REPORT_PHYS)
161                         cdb[1] = SA_REPORT_PHYS_EXTENDED;
162                 else
163                 cdb[1] = SA_REPORT_LOG_EXTENDED;
164                 cdb[8] = (uint8_t)((datasize) >> 8);
165                 cdb[9] = (uint8_t)datasize;
166                 break;
167         case TEST_UNIT_READY:
168                 request->data_direction = SOP_DATA_DIR_NONE;
169                 break;
170         case SA_GET_RAID_MAP:
171                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
172                 cdb[0] = SA_CISS_READ;
173                 cdb[1] = cmd;
174                 cdb[8] = (uint8_t)((datasize) >> 8);
175                 cdb[9] = (uint8_t)datasize;
176                 break;
177         case SA_CACHE_FLUSH:
178                 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
179                 cdb[0] = BMIC_WRITE;
180                 cdb[6] = BMIC_CACHE_FLUSH;
181                 cdb[7] = (uint8_t)((datasize)  << 8);
182                 cdb[8] = (uint8_t)((datasize)  >> 8);
183                 break;
184         case BMIC_IDENTIFY_CONTROLLER:
185         case BMIC_IDENTIFY_PHYSICAL_DEVICE:
186                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
187                 cdb[0] = BMIC_READ;
188                 cdb[6] = cmd;
189                 cdb[7] = (uint8_t)((datasize)  << 8);
190                 cdb[8] = (uint8_t)((datasize)  >> 8);
191                 break;
192         case BMIC_WRITE_HOST_WELLNESS:
193                 request->data_direction = SOP_DATA_DIR_FROM_DEVICE;
194                 memcpy(device_mem.virt_addr, buff, datasize);
195                 cdb[0] = BMIC_WRITE;
196                 cdb[6] = cmd;
197                 cdb[7] = (uint8_t)((datasize)  << 8);
198                 cdb[8] = (uint8_t)((datasize)  >> 8);
199                 break;
200         case BMIC_SENSE_SUBSYSTEM_INFORMATION:
201                 request->data_direction = SOP_DATA_DIR_TO_DEVICE;
202                 cdb[0] = BMIC_READ;
203                 cdb[6] = cmd;
204                 cdb[7] = (uint8_t)((datasize)  << 8);
205                 cdb[8] = (uint8_t)((datasize)  >> 8);
206                 break;  
207         default:
208                 DBG_ERR("unknown command 0x%x", cmd);
209                 break;
210         }
211
212         tag = pqisrc_get_tag(&softs->taglist);
213         if (INVALID_ELEM == tag) {
214                 DBG_ERR("Tag not available\n");
215                 ret = PQI_STATUS_FAILURE;
216                 goto err_notag;
217         }
218
219         ((pqisrc_raid_req_t *)request)->request_id = tag;
220         ((pqisrc_raid_req_t *)request)->error_index = ((pqisrc_raid_req_t *)request)->request_id;
221         ((pqisrc_raid_req_t *)request)->response_queue_id = ob_q->q_id;
222         rcb = &softs->rcb[tag];
223         rcb->success_cmp_callback = pqisrc_process_internal_raid_response_success;
224         rcb->error_cmp_callback = pqisrc_process_internal_raid_response_error;
225
226         rcb->req_pending = true;
227         rcb->tag = tag;
228         /* Submit Command */
229         ret = pqisrc_submit_cmnd(softs, ib_q, request);
230
231         if (ret != PQI_STATUS_SUCCESS) {
232                 DBG_ERR("Unable to submit command\n");
233                 goto err_out;
234         }
235
236         ret = pqisrc_wait_on_condition(softs, rcb);
237         if (ret != PQI_STATUS_SUCCESS) {
238                 DBG_ERR("Internal RAID request timed out: cmd : 0x%c\n", cmd);
239                 goto err_out;
240         }
241
242         if (datasize) {
243                 if (buff) {
244                         memcpy(buff, device_mem.virt_addr, datasize);
245                 }
246                 os_dma_mem_free(softs, &device_mem);
247         }
248         
249         ret = rcb->status;
250         if (ret) {
251                 if(error_info) {
252                         memcpy(error_info, 
253                                rcb->error_info,
254                                sizeof(*error_info));
255
256                         if (error_info->data_out_result ==
257                             PQI_RAID_DATA_IN_OUT_UNDERFLOW) {
258                                 ret = PQI_STATUS_SUCCESS;
259                         }
260                         else{
261                                 DBG_INFO("Error!! Bus=%u Target=%u, Cmd=0x%x," 
262                                         "Ret=%d\n", BMIC_GET_LEVEL_2_BUS(scsi3addr), 
263                                         BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), 
264                                         cmd, ret);
265                                 ret = PQI_STATUS_FAILURE;
266                         }
267                 }
268         } else {
269                 if(error_info) {
270                         ret = PQI_STATUS_SUCCESS;
271                         memset(error_info, 0, sizeof(*error_info));
272                 }
273         }
274
275         os_reset_rcb(rcb);
276         pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
277         DBG_FUNC("OUT\n");
278         return ret;
279
280 err_out:
281         DBG_ERR("Error!! Bus=%u Target=%u, Cmd=0x%x, Ret=%d\n", 
282                 BMIC_GET_LEVEL_2_BUS(scsi3addr), BMIC_GET_LEVEL_TWO_TARGET(scsi3addr), 
283                 cmd, ret);
284         os_reset_rcb(rcb);
285         pqisrc_put_tag(&softs->taglist, ((pqisrc_raid_req_t *)request)->request_id);
286 err_notag:
287         if (datasize)
288                 os_dma_mem_free(softs, &device_mem);
289         DBG_FUNC("FAILED \n");
290         return ret;
291 }
292
293 /* common function used to send report physical and logical luns cmnds*/
294 static int pqisrc_report_luns(pqisrc_softstate_t *softs, uint8_t cmd,
295         void *buff, size_t buf_len)
296 {
297         int ret;
298         pqisrc_raid_req_t request;
299
300         DBG_FUNC("IN\n");
301
302         memset(&request, 0, sizeof(request));
303         ret =  pqisrc_build_send_raid_request(softs, &request, buff, 
304                                 buf_len, cmd, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
305
306         DBG_FUNC("OUT\n");
307
308         return ret;
309 }
310
311 /* subroutine used to get physical and logical luns of the device */
312 static int pqisrc_get_physical_logical_luns(pqisrc_softstate_t *softs, uint8_t cmd,
313                 reportlun_data_ext_t **buff, size_t *data_length)
314 {
315         int ret;
316         size_t list_len;
317         size_t data_len;
318         size_t new_lun_list_length;
319         reportlun_data_ext_t *lun_data;
320         reportlun_header_t report_lun_header;
321
322         DBG_FUNC("IN\n");
323
324         ret = pqisrc_report_luns(softs, cmd, &report_lun_header,
325                 sizeof(report_lun_header));
326
327         if (ret) {
328                 DBG_ERR("failed return code: %d\n", ret);
329                 return ret;
330         }
331         list_len = BE_32(report_lun_header.list_length);
332
333 retry:
334         data_len = sizeof(reportlun_header_t) + list_len;
335         *data_length = data_len;
336
337         lun_data = os_mem_alloc(softs, data_len);
338
339         if (!lun_data) {
340                 DBG_ERR("failed to allocate memory for lun_data\n");
341                 return PQI_STATUS_FAILURE;
342         }
343                 
344         if (list_len == 0) {
345                 DBG_INFO("list_len is 0\n");
346                 memcpy(lun_data, &report_lun_header, sizeof(report_lun_header));
347                 goto out;
348         }
349
350         ret = pqisrc_report_luns(softs, cmd, lun_data, data_len);
351
352         if (ret) {
353                 DBG_ERR("error\n");
354                 goto error;
355         }
356
357         new_lun_list_length = BE_32(lun_data->header.list_length);
358
359         if (new_lun_list_length > list_len) {
360                 list_len = new_lun_list_length;
361                 os_mem_free(softs, (void *)lun_data, data_len);
362                 goto retry;
363         }
364
365 out:
366         *buff = lun_data;
367         DBG_FUNC("OUT\n");
368         return 0;
369
370 error:
371         os_mem_free(softs, (void *)lun_data, data_len);
372         DBG_ERR("FAILED\n");
373         return ret;
374 }
375
376 /*
377  * Function used to get physical and logical device list
378  */
379 static int pqisrc_get_phys_log_device_list(pqisrc_softstate_t *softs,
380         reportlun_data_ext_t **physical_dev_list,
381         reportlun_data_ext_t **logical_dev_list, 
382         size_t *phys_data_length,
383         size_t *log_data_length)
384 {
385         int ret = PQI_STATUS_SUCCESS;
386         size_t logical_list_length;
387         size_t logdev_data_length;
388         size_t data_length;
389         reportlun_data_ext_t *local_logdev_list;
390         reportlun_data_ext_t *logdev_data;
391         reportlun_header_t report_lun_header;
392         
393
394         DBG_FUNC("IN\n");
395
396         ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_PHYS, physical_dev_list, phys_data_length);
397         if (ret) {
398                 DBG_ERR("report physical LUNs failed");
399                 return ret;
400         }
401
402         ret = pqisrc_get_physical_logical_luns(softs, SA_REPORT_LOG, logical_dev_list, log_data_length);
403         if (ret) {
404                 DBG_ERR("report logical LUNs failed");
405                 return ret;
406         }
407
408
409         logdev_data = *logical_dev_list;
410
411         if (logdev_data) {
412                 logical_list_length =
413                         BE_32(logdev_data->header.list_length);
414         } else {
415                 memset(&report_lun_header, 0, sizeof(report_lun_header));
416                 logdev_data =
417                         (reportlun_data_ext_t *)&report_lun_header;
418                 logical_list_length = 0;
419         }
420
421         logdev_data_length = sizeof(reportlun_header_t) +
422                 logical_list_length;
423
424         /* Adding LOGICAL device entry for controller */
425         local_logdev_list = os_mem_alloc(softs,
426                                             logdev_data_length + sizeof(reportlun_ext_entry_t));
427         if (!local_logdev_list) {
428                 data_length = *log_data_length;
429                 os_mem_free(softs, (char *)*logical_dev_list, data_length);
430                 *logical_dev_list = NULL;
431                 return PQI_STATUS_FAILURE;
432         }
433
434         memcpy(local_logdev_list, logdev_data, logdev_data_length);
435         memset((uint8_t *)local_logdev_list + logdev_data_length, 0,
436                 sizeof(reportlun_ext_entry_t));
437         local_logdev_list->header.list_length = BE_32(logical_list_length +
438                                                         sizeof(reportlun_ext_entry_t));
439         data_length = *log_data_length;
440         os_mem_free(softs, (char *)*logical_dev_list, data_length);
441         *log_data_length = logdev_data_length + sizeof(reportlun_ext_entry_t);
442         *logical_dev_list = local_logdev_list;
443
444         DBG_FUNC("OUT\n");
445
446         return ret;
447 }
448
449 /* Subroutine used to set Bus-Target-Lun for the requested device */
450 static inline void pqisrc_set_btl(pqi_scsi_dev_t *device,
451         int bus, int target, int lun)
452 {
453         DBG_FUNC("IN\n");
454
455         device->bus = bus;
456         device->target = target;
457         device->lun = lun;
458
459         DBG_FUNC("OUT\n");
460 }
461
462 inline boolean_t pqisrc_is_external_raid_device(pqi_scsi_dev_t *device)
463 {
464         return device->is_external_raid_device;
465 }
466
467 static inline boolean_t pqisrc_is_external_raid_addr(uint8_t *scsi3addr)
468 {
469         return scsi3addr[2] != 0;
470 }
471
472 /* Function used to assign Bus-Target-Lun for the requested device */
473 static void pqisrc_assign_btl(pqi_scsi_dev_t *device)
474 {
475         uint8_t *scsi3addr;
476         uint32_t lunid;
477         uint32_t bus;
478         uint32_t target;
479         uint32_t lun;
480         DBG_FUNC("IN\n");
481
482         scsi3addr = device->scsi3addr;
483         lunid = GET_LE32(scsi3addr);
484
485         if (pqisrc_is_hba_lunid(scsi3addr)) {
486                 /* The specified device is the controller. */
487                 pqisrc_set_btl(device, PQI_HBA_BUS, PQI_CTLR_INDEX, lunid & 0x3fff);
488                 device->target_lun_valid = true;
489                 return;
490         }
491
492         if (pqisrc_is_logical_device(device)) {
493                 if (pqisrc_is_external_raid_device(device)) {
494                         DBG_INFO("External Raid Device!!!");
495                         bus = PQI_EXTERNAL_RAID_VOLUME_BUS;
496                         target = (lunid >> 16) & 0x3fff;
497                         lun = lunid & 0xff;
498                 } else {
499                         bus = PQI_RAID_VOLUME_BUS;
500                         lun = 0;
501                         target = lunid & 0x3fff;
502                 }
503                 pqisrc_set_btl(device, bus, target, lun);
504                 device->target_lun_valid = true;
505                 return;
506         }
507
508         /* physical device */
509         pqisrc_set_btl(device, PQI_PHYSICAL_DEVICE_BUS, PQI_PD_INDEX(scsi3addr[6]), 0);
510
511         DBG_FUNC("OUT\n");
512 }
513
514 /* Build and send the internal INQUIRY command to particular device */
515 static int pqisrc_send_scsi_inquiry(pqisrc_softstate_t *softs,
516         uint8_t *scsi3addr, uint16_t vpd_page, uint8_t *buff, int buf_len)
517 {
518         int ret = PQI_STATUS_SUCCESS;
519         pqisrc_raid_req_t request;
520         raid_path_error_info_elem_t error_info;
521
522         DBG_FUNC("IN\n");
523
524         memset(&request, 0, sizeof(request));
525         ret =  pqisrc_build_send_raid_request(softs, &request, buff, buf_len, 
526                                                                 SA_INQUIRY, vpd_page, scsi3addr, &error_info);
527
528         DBG_FUNC("OUT\n");
529         return ret;
530 }
531
532 /* Function used to parse the sense information from response */
533 static void pqisrc_fetch_sense_info(const uint8_t *sense_data,
534         unsigned sense_data_length, uint8_t *sense_key, uint8_t *asc, uint8_t *ascq)
535 {
536         struct sense_header_scsi header;
537
538         DBG_FUNC("IN\n");
539
540         *sense_key = 0;
541         *ascq = 0;
542         *asc = 0;
543
544         if (pqisrc_update_scsi_sense(sense_data, sense_data_length, &header)) {
545                 *sense_key = header.sense_key;
546                 *asc = header.asc;
547                 *ascq = header.ascq;
548         }
549
550         DBG_INFO("sense_key: %x asc: %x ascq: %x\n", *sense_key, *asc, *ascq);
551
552         DBG_FUNC("OUT\n");
553 }
554
555 /* Function used to validate volume offline status */
556 static uint8_t pqisrc_get_volume_offline_status(pqisrc_softstate_t *softs,
557         uint8_t *scsi3addr)
558 {
559         int ret = PQI_STATUS_SUCCESS;
560         uint8_t status = SA_LV_STATUS_VPD_UNSUPPORTED;
561         uint8_t size;
562         uint8_t *buff = NULL;
563
564         DBG_FUNC("IN\n");
565         
566         buff = os_mem_alloc(softs, 64);
567         if (!buff)
568                 return PQI_STATUS_FAILURE;
569
570         /* Get the size of the VPD return buff. */
571         ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
572                 buff, SCSI_VPD_HEADER_LENGTH);
573
574         if (ret)
575                 goto out;
576
577         size = buff[3];
578
579         /* Now get the whole VPD buff. */
580         ret = pqisrc_send_scsi_inquiry(softs, scsi3addr, VPD_PAGE | SA_VPD_LV_STATUS,
581                 buff, size + SCSI_VPD_HEADER_LENGTH);
582         if (ret)
583                 goto out;
584
585         status = buff[4];
586
587 out:
588         os_mem_free(softs, (char *)buff, 64);
589         DBG_FUNC("OUT\n");
590
591         return status;
592 }
593
594
595 /* Determine offline status of a volume.  Returns appropriate SA_LV_* status.*/
596 static uint8_t pqisrc_get_dev_vol_status(pqisrc_softstate_t *softs,
597         uint8_t *scsi3addr)
598 {
599         int ret = PQI_STATUS_SUCCESS;
600         uint8_t *sense_data;
601         unsigned sense_data_len;
602         uint8_t sense_key;
603         uint8_t asc;
604         uint8_t ascq;
605         uint8_t off_status;
606         uint8_t scsi_status;
607         pqisrc_raid_req_t request;
608         raid_path_error_info_elem_t error_info;
609
610         DBG_FUNC("IN\n");
611
612         memset(&request, 0, sizeof(request));   
613         ret =  pqisrc_build_send_raid_request(softs, &request, NULL, 0, 
614                                 TEST_UNIT_READY, 0, scsi3addr, &error_info);
615         
616         if (ret)
617                 goto error;
618         sense_data = error_info.data;
619         sense_data_len = LE_16(error_info.sense_data_len);
620
621         if (sense_data_len > sizeof(error_info.data))
622                 sense_data_len = sizeof(error_info.data);
623
624         pqisrc_fetch_sense_info(sense_data, sense_data_len, &sense_key, &asc,
625                 &ascq);
626
627         scsi_status = error_info.status;
628
629         /* scsi status: "CHECK CONDN" /  SK: "not ready" ? */
630         if (scsi_status != 2 ||
631             sense_key != 2 ||
632             asc != ASC_LUN_NOT_READY) {
633                 return SA_LV_OK;
634         }
635
636         /* Determine the reason for not ready state. */
637         off_status = pqisrc_get_volume_offline_status(softs, scsi3addr);
638
639         DBG_INFO("offline_status 0x%x\n", off_status);
640
641         /* Keep volume offline in certain cases. */
642         switch (off_status) {
643         case SA_LV_UNDERGOING_ERASE:
644         case SA_LV_NOT_AVAILABLE:
645         case SA_LV_UNDERGOING_RPI:
646         case SA_LV_PENDING_RPI:
647         case SA_LV_ENCRYPTED_NO_KEY:
648         case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
649         case SA_LV_UNDERGOING_ENCRYPTION:
650         case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
651         case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
652                 return off_status;
653         case SA_LV_STATUS_VPD_UNSUPPORTED:
654                 /*
655                  * If the VPD status page isn't available,
656                  * use ASC/ASCQ to determine state.
657                  */
658                 if (ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS ||
659                     ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)
660                         return off_status;
661                 break;
662         }
663
664         DBG_FUNC("OUT\n");
665
666         return SA_LV_OK;
667
668 error:
669         return SA_LV_STATUS_VPD_UNSUPPORTED;
670 }
671
672 /* Validate the RAID map parameters */
673 static int pqisrc_raid_map_validation(pqisrc_softstate_t *softs,
674         pqi_scsi_dev_t *device, pqisrc_raid_map_t *raid_map)
675 {
676         char *error_msg;
677         uint32_t raidmap_size;
678         uint32_t r5or6_blocks_per_row;
679         unsigned phys_dev_num;
680         unsigned num_raidmap_entries;
681
682         DBG_FUNC("IN\n");
683
684         raidmap_size = LE_32(raid_map->structure_size);
685         if (raidmap_size < offsetof(pqisrc_raid_map_t, dev_data)) {
686                 error_msg = "RAID map too small\n";
687                 goto error;
688         }
689
690         if (raidmap_size > sizeof(*raid_map)) {
691                 error_msg = "RAID map too large\n";
692                 goto error;
693         }
694
695         phys_dev_num = LE_16(raid_map->layout_map_count) *
696                 (LE_16(raid_map->data_disks_per_row) +
697                 LE_16(raid_map->metadata_disks_per_row));
698         num_raidmap_entries = phys_dev_num *
699                 LE_16(raid_map->row_cnt);
700
701         if (num_raidmap_entries > RAID_MAP_MAX_ENTRIES) {
702                 error_msg = "invalid number of map entries in RAID map\n";
703                 goto error;
704         }
705
706         if (device->raid_level == SA_RAID_1) {
707                 if (LE_16(raid_map->layout_map_count) != 2) {
708                         error_msg = "invalid RAID-1 map\n";
709                         goto error;
710                 }
711         } else if (device->raid_level == SA_RAID_ADM) {
712                 if (LE_16(raid_map->layout_map_count) != 3) {
713                         error_msg = "invalid RAID-1(ADM) map\n";
714                         goto error;
715                 }
716         } else if ((device->raid_level == SA_RAID_5 ||
717                 device->raid_level == SA_RAID_6) &&
718                 LE_16(raid_map->layout_map_count) > 1) {
719                 /* RAID 50/60 */
720                 r5or6_blocks_per_row =
721                         LE_16(raid_map->strip_size) *
722                         LE_16(raid_map->data_disks_per_row);
723                 if (r5or6_blocks_per_row == 0) {
724                         error_msg = "invalid RAID-5 or RAID-6 map\n";
725                         goto error;
726                 }
727         }
728
729         DBG_FUNC("OUT\n");
730
731         return 0;
732
733 error:
734         DBG_ERR("%s\n", error_msg);
735         return PQI_STATUS_FAILURE;
736 }
737
738 /* Get device raidmap for the requested device */
739 static int pqisrc_get_device_raidmap(pqisrc_softstate_t *softs,
740         pqi_scsi_dev_t *device)
741 {
742         int ret = PQI_STATUS_SUCCESS;
743         pqisrc_raid_req_t request;
744         pqisrc_raid_map_t *raid_map;
745
746         DBG_FUNC("IN\n");
747
748         raid_map = os_mem_alloc(softs, sizeof(*raid_map));
749         if (!raid_map)
750                 return PQI_STATUS_FAILURE;
751
752         memset(&request, 0, sizeof(request));
753         ret =  pqisrc_build_send_raid_request(softs, &request, raid_map, sizeof(*raid_map), 
754                                         SA_GET_RAID_MAP, 0, device->scsi3addr, NULL);
755
756         if (ret) {
757                 DBG_ERR("error in build send raid req ret=%d\n", ret);
758                 goto err_out;
759         }
760
761         ret = pqisrc_raid_map_validation(softs, device, raid_map);
762         if (ret) {
763                 DBG_ERR("error in raid map validation ret=%d\n", ret);
764                 goto err_out;
765         }
766
767         device->raid_map = raid_map;
768         DBG_FUNC("OUT\n");
769         return 0;
770
771 err_out:
772         os_mem_free(softs, (char*)raid_map, sizeof(*raid_map));
773         DBG_FUNC("FAILED \n");
774         return ret;
775 }
776
777 /* Get device ioaccel_status to validate the type of device */
778 static void pqisrc_get_dev_ioaccel_status(pqisrc_softstate_t *softs,
779         pqi_scsi_dev_t *device)
780 {
781         int ret = PQI_STATUS_SUCCESS;
782         uint8_t *buff;
783         uint8_t ioaccel_status;
784
785         DBG_FUNC("IN\n");
786
787         buff = os_mem_alloc(softs, 64);
788         if (!buff)
789                 return;
790
791         ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
792                                         VPD_PAGE | SA_VPD_LV_IOACCEL_STATUS, buff, 64);
793         if (ret) {
794                 DBG_ERR("error in send scsi inquiry ret=%d\n", ret);
795                 goto err_out;
796         }
797         
798         ioaccel_status = buff[IOACCEL_STATUS_BYTE];
799         device->offload_config =
800                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
801
802         if (device->offload_config) {
803                 device->offload_enabled_pending =
804                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
805                 if (pqisrc_get_device_raidmap(softs, device))
806                         device->offload_enabled_pending = false;
807         }
808         
809         DBG_INFO("offload_config: 0x%x offload_enabled_pending: 0x%x \n", 
810                         device->offload_config, device->offload_enabled_pending);
811
812 err_out:
813         os_mem_free(softs, (char*)buff, 64);
814         DBG_FUNC("OUT\n");
815 }
816
817 /* Get RAID level of requested device */
818 static void pqisrc_get_dev_raid_level(pqisrc_softstate_t *softs,
819         pqi_scsi_dev_t *device)
820 {
821         uint8_t raid_level;
822         uint8_t *buff;
823
824         DBG_FUNC("IN\n");
825
826         raid_level = SA_RAID_UNKNOWN;
827
828         buff = os_mem_alloc(softs, 64);
829         if (buff) {
830                 int ret;
831                 ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr,
832                         VPD_PAGE | SA_VPD_LV_DEVICE_GEOMETRY, buff, 64);
833                 if (ret == 0) {
834                         raid_level = buff[8];
835                         if (raid_level > SA_RAID_MAX)
836                                 raid_level = SA_RAID_UNKNOWN;
837                 }
838                 os_mem_free(softs, (char*)buff, 64);
839         }
840
841         device->raid_level = raid_level;
842         DBG_INFO("RAID LEVEL: %x \n",  raid_level);
843         DBG_FUNC("OUT\n");
844 }
845
846 /* Parse the inquiry response and determine the type of device */
847 static int pqisrc_get_dev_data(pqisrc_softstate_t *softs,
848         pqi_scsi_dev_t *device)
849 {
850         int ret = PQI_STATUS_SUCCESS;
851         uint8_t *inq_buff;
852
853         DBG_FUNC("IN\n");
854
855         inq_buff = os_mem_alloc(softs, OBDR_TAPE_INQ_SIZE);
856         if (!inq_buff)
857                 return PQI_STATUS_FAILURE;
858
859         /* Send an inquiry to the device to see what it is. */
860         ret = pqisrc_send_scsi_inquiry(softs, device->scsi3addr, 0, inq_buff,
861                 OBDR_TAPE_INQ_SIZE);
862         if (ret)
863                 goto err_out;
864         pqisrc_sanitize_inquiry_string(&inq_buff[8], 8);
865         pqisrc_sanitize_inquiry_string(&inq_buff[16], 16);
866
867         device->devtype = inq_buff[0] & 0x1f;
868         memcpy(device->vendor, &inq_buff[8],
869                 sizeof(device->vendor));
870         memcpy(device->model, &inq_buff[16],
871                 sizeof(device->model));
872         DBG_INFO("DEV_TYPE: %x VENDOR: %s MODEL: %s\n",  device->devtype, device->vendor, device->model);
873
874         if (pqisrc_is_logical_device(device) && device->devtype == DISK_DEVICE) {
875                 if (pqisrc_is_external_raid_device(device)) {
876                         device->raid_level = SA_RAID_UNKNOWN;
877                         device->volume_status = SA_LV_OK;
878                         device->volume_offline = false;
879                 } 
880                 else {
881                         pqisrc_get_dev_raid_level(softs, device);
882                         pqisrc_get_dev_ioaccel_status(softs, device);
883                         device->volume_status = pqisrc_get_dev_vol_status(softs,
884                                                 device->scsi3addr);
885                         device->volume_offline = device->volume_status != SA_LV_OK;
886                 }
887         }
888
889         /*
890          * Check if this is a One-Button-Disaster-Recovery device
891          * by looking for "$DR-10" at offset 43 in the inquiry data.
892          */
893         device->is_obdr_device = (device->devtype == ROM_DEVICE &&
894                 memcmp(&inq_buff[OBDR_SIG_OFFSET], OBDR_TAPE_SIG,
895                         OBDR_SIG_LEN) == 0);
896 err_out:
897         os_mem_free(softs, (char*)inq_buff, OBDR_TAPE_INQ_SIZE);
898
899         DBG_FUNC("OUT\n");
900         return ret;
901 }
902
903 /*
904  * BMIC (Basic Management And Interface Commands) command
905  * to get the controller identify params
906  */
907 static int pqisrc_identify_ctrl(pqisrc_softstate_t *softs,
908         bmic_ident_ctrl_t *buff)
909 {
910         int ret = PQI_STATUS_SUCCESS;
911         pqisrc_raid_req_t request;
912
913         DBG_FUNC("IN\n");
914
915         memset(&request, 0, sizeof(request));   
916         ret =  pqisrc_build_send_raid_request(softs, &request, buff, sizeof(*buff), 
917                                 BMIC_IDENTIFY_CONTROLLER, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
918         DBG_FUNC("OUT\n");
919
920         return ret;
921 }
922
923 /* Get the adapter FW version using BMIC_IDENTIFY_CONTROLLER */
924 int pqisrc_get_ctrl_fw_version(pqisrc_softstate_t *softs)
925 {
926         int ret = PQI_STATUS_SUCCESS;
927         bmic_ident_ctrl_t *identify_ctrl;
928
929         DBG_FUNC("IN\n");
930
931         identify_ctrl = os_mem_alloc(softs, sizeof(*identify_ctrl));
932         if (!identify_ctrl) {
933                 DBG_ERR("failed to allocate memory for identify_ctrl\n");
934                 return PQI_STATUS_FAILURE;
935         }
936
937         memset(identify_ctrl, 0, sizeof(*identify_ctrl));
938
939         ret = pqisrc_identify_ctrl(softs, identify_ctrl);
940         if (ret)
941                 goto out;
942      
943         softs->fw_build_number = identify_ctrl->fw_build_number;
944         memcpy(softs->fw_version, identify_ctrl->fw_version,
945                 sizeof(identify_ctrl->fw_version));
946         softs->fw_version[sizeof(identify_ctrl->fw_version)] = '\0';
947         snprintf(softs->fw_version +
948                 strlen(softs->fw_version),
949                 sizeof(softs->fw_version),
950                 "-%u", identify_ctrl->fw_build_number);
951 out:
952         os_mem_free(softs, (char *)identify_ctrl, sizeof(*identify_ctrl));
953         DBG_INFO("Firmware version: %s Firmware build number: %d\n", softs->fw_version, softs->fw_build_number);
954         DBG_FUNC("OUT\n");
955         return ret;
956 }
957
958 /* BMIC command to determine scsi device identify params */
959 static int pqisrc_identify_physical_disk(pqisrc_softstate_t *softs,
960         pqi_scsi_dev_t *device,
961         bmic_ident_physdev_t *buff,
962         int buf_len)
963 {
964         int ret = PQI_STATUS_SUCCESS;
965         uint16_t bmic_device_index;
966         pqisrc_raid_req_t request;
967
968
969         DBG_FUNC("IN\n");
970
971         memset(&request, 0, sizeof(request));   
972         bmic_device_index = BMIC_GET_DRIVE_NUMBER(device->scsi3addr);
973         request.cdb[2] = (uint8_t)bmic_device_index;
974         request.cdb[9] = (uint8_t)(bmic_device_index >> 8);
975
976         ret =  pqisrc_build_send_raid_request(softs, &request, buff, buf_len, 
977                                 BMIC_IDENTIFY_PHYSICAL_DEVICE, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
978         DBG_FUNC("OUT\n");
979         return ret;
980 }
981
982 /*
983  * Function used to get the scsi device information using one of BMIC
984  * BMIC_IDENTIFY_PHYSICAL_DEVICE
985  */
986 static void pqisrc_get_physical_device_info(pqisrc_softstate_t *softs,
987         pqi_scsi_dev_t *device,
988         bmic_ident_physdev_t *id_phys)
989 {
990         int ret = PQI_STATUS_SUCCESS;
991
992         DBG_FUNC("IN\n");
993         memset(id_phys, 0, sizeof(*id_phys));
994
995         ret= pqisrc_identify_physical_disk(softs, device,
996                 id_phys, sizeof(*id_phys));
997         if (ret) {
998                 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
999                 return;
1000         }
1001
1002         device->queue_depth =
1003                 LE_16(id_phys->current_queue_depth_limit);
1004         device->device_type = id_phys->device_type;
1005         device->active_path_index = id_phys->active_path_number;
1006         device->path_map = id_phys->redundant_path_present_map;
1007         memcpy(&device->box,
1008                 &id_phys->alternate_paths_phys_box_on_port,
1009                 sizeof(device->box));
1010         memcpy(&device->phys_connector,
1011                 &id_phys->alternate_paths_phys_connector,
1012                 sizeof(device->phys_connector));
1013         device->bay = id_phys->phys_bay_in_box;
1014
1015         DBG_INFO("BMIC DEV_TYPE: %x QUEUE DEPTH: 0x%x \n",  device->device_type, device->queue_depth);
1016         DBG_FUNC("OUT\n");
1017 }
1018
1019
1020 /* Function used to find the entry of the device in a list */
1021 static device_status_t pqisrc_scsi_find_entry(pqisrc_softstate_t *softs,
1022         pqi_scsi_dev_t *device_to_find,
1023         pqi_scsi_dev_t **same_device)
1024 {
1025         pqi_scsi_dev_t *device;
1026         int i,j;
1027         DBG_FUNC("IN\n");
1028         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1029                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1030                         if(softs->device_list[i][j] == NULL)
1031                                 continue;
1032                         device = softs->device_list[i][j];
1033                         if (pqisrc_scsi3addr_equal(device_to_find->scsi3addr,
1034                                 device->scsi3addr)) {
1035                                 *same_device = device;
1036                                 if (pqisrc_device_equal(device_to_find, device)) {
1037                                         if (device_to_find->volume_offline)
1038                                                 return DEVICE_CHANGED;
1039                                         return DEVICE_UNCHANGED;
1040                                 }
1041                                 return DEVICE_CHANGED;
1042                         }
1043                 }
1044         }
1045         DBG_FUNC("OUT\n");
1046
1047         return DEVICE_NOT_FOUND;
1048 }
1049
1050
1051 /* Update the newly added devices as existed device */
1052 static void pqisrc_exist_device_update(pqisrc_softstate_t *softs,
1053         pqi_scsi_dev_t *device_exist,
1054         pqi_scsi_dev_t *new_device)
1055 {
1056         DBG_FUNC("IN\n");
1057         device_exist->expose_device = new_device->expose_device;
1058         memcpy(device_exist->vendor, new_device->vendor,
1059                 sizeof(device_exist->vendor));
1060         memcpy(device_exist->model, new_device->model,
1061                 sizeof(device_exist->model));
1062         device_exist->is_physical_device = new_device->is_physical_device;
1063         device_exist->is_external_raid_device =
1064                 new_device->is_external_raid_device;
1065         device_exist->sas_address = new_device->sas_address;
1066         device_exist->raid_level = new_device->raid_level;
1067         device_exist->queue_depth = new_device->queue_depth;
1068         device_exist->ioaccel_handle = new_device->ioaccel_handle;
1069         device_exist->volume_status = new_device->volume_status;
1070         device_exist->active_path_index = new_device->active_path_index;
1071         device_exist->path_map = new_device->path_map;
1072         device_exist->bay = new_device->bay;
1073         memcpy(device_exist->box, new_device->box,
1074                 sizeof(device_exist->box));
1075         memcpy(device_exist->phys_connector, new_device->phys_connector,
1076                 sizeof(device_exist->phys_connector));
1077         device_exist->offload_config = new_device->offload_config;
1078         device_exist->offload_enabled = false;
1079         device_exist->offload_enabled_pending =
1080                 new_device->offload_enabled_pending;
1081         device_exist->offload_to_mirror = 0;
1082         if (device_exist->raid_map)
1083                 os_mem_free(softs,
1084                             (char *)device_exist->raid_map,
1085                             sizeof(*device_exist->raid_map));
1086         device_exist->raid_map = new_device->raid_map;
1087         /* To prevent this from being freed later. */
1088         new_device->raid_map = NULL;
1089         DBG_FUNC("OUT\n");
1090 }
1091
1092 /* Validate the ioaccel_handle for a newly added device */
1093 static pqi_scsi_dev_t *pqisrc_identify_device_via_ioaccel(
1094         pqisrc_softstate_t *softs, uint32_t ioaccel_handle)
1095 {
1096         pqi_scsi_dev_t *device;
1097         int i,j;
1098         DBG_FUNC("IN\n");       
1099         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1100                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1101                         if(softs->device_list[i][j] == NULL)
1102                                 continue;
1103                         device = softs->device_list[i][j];
1104                         if (device->devtype != DISK_DEVICE)
1105                                 continue;
1106                         if (pqisrc_is_logical_device(device))
1107                                 continue;
1108                         if (device->ioaccel_handle == ioaccel_handle)
1109                                 return device;
1110                 }
1111         }
1112         DBG_FUNC("OUT\n");
1113
1114         return NULL;
1115 }
1116
1117 /* Get the scsi device queue depth */
1118 static void pqisrc_update_log_dev_qdepth(pqisrc_softstate_t *softs)
1119 {
1120         unsigned i;
1121         unsigned phys_dev_num;
1122         unsigned num_raidmap_entries;
1123         unsigned queue_depth;
1124         pqisrc_raid_map_t *raid_map;
1125         pqi_scsi_dev_t *device;
1126         raidmap_data_t *dev_data;
1127         pqi_scsi_dev_t *phys_disk;
1128         unsigned j;
1129         
1130         DBG_FUNC("IN\n");
1131
1132         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1133                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1134                         if(softs->device_list[i][j] == NULL)
1135                                 continue;
1136                         device = softs->device_list[i][j];
1137                         if (device->devtype != DISK_DEVICE)
1138                                 continue;
1139                         if (!pqisrc_is_logical_device(device))
1140                                 continue;
1141                         if (pqisrc_is_external_raid_device(device))
1142                                 continue;
1143                         device->queue_depth = PQI_LOGICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1144                         raid_map = device->raid_map;
1145                         if (!raid_map)
1146                                 return;
1147                         dev_data = raid_map->dev_data;
1148                         phys_dev_num = LE_16(raid_map->layout_map_count) *
1149                                         (LE_16(raid_map->data_disks_per_row) +
1150                                         LE_16(raid_map->metadata_disks_per_row));
1151                         num_raidmap_entries = phys_dev_num *
1152                                                 LE_16(raid_map->row_cnt);
1153
1154                         queue_depth = 0;
1155                         for (i = 0; i < num_raidmap_entries; i++) {
1156                                 phys_disk = pqisrc_identify_device_via_ioaccel(softs,
1157                                                 dev_data[i].ioaccel_handle);
1158
1159                                 if (!phys_disk) {
1160                                         DBG_WARN(
1161                                         "Failed to find physical disk handle for logical drive %016llx\n",
1162                                                 (unsigned long long)BE_64(device->scsi3addr[0]));
1163                                         device->offload_enabled = false;
1164                                         device->offload_enabled_pending = false;
1165                                         if (raid_map)
1166                                                 os_mem_free(softs, (char *)raid_map, sizeof(*raid_map));
1167                                         device->raid_map = NULL;
1168                                         return;
1169                                 }
1170
1171                                 queue_depth += phys_disk->queue_depth;
1172                         }
1173
1174                         device->queue_depth = queue_depth;
1175                 } /* end inner loop */
1176         }/* end outer loop */
1177         DBG_FUNC("OUT\n");
1178 }
1179
1180 /* Function used to add a scsi device to OS scsi subsystem */
1181 static int pqisrc_add_device(pqisrc_softstate_t *softs,
1182         pqi_scsi_dev_t *device)
1183 {
1184         DBG_FUNC("IN\n");
1185         DBG_INFO("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1186                 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1187
1188         device->invalid = false;
1189
1190         if(device->expose_device) {
1191                 /* TBD: Call OS upper layer function to add the device entry */
1192                 os_add_device(softs,device);
1193         }
1194         DBG_FUNC("OUT\n");
1195         return PQI_STATUS_SUCCESS;
1196
1197 }
1198
1199 /* Function used to remove a scsi device from OS scsi subsystem */
1200 void pqisrc_remove_device(pqisrc_softstate_t *softs,
1201         pqi_scsi_dev_t *device)
1202 {
1203         DBG_FUNC("IN\n");
1204         DBG_INFO("vendor: %s model: %s bus:%d target:%d lun:%d is_physical_device:0x%x expose_device:0x%x volume_offline 0x%x volume_status 0x%x \n",
1205                 device->vendor, device->model, device->bus, device->target, device->lun, device->is_physical_device, device->expose_device, device->volume_offline, device->volume_status);
1206
1207         /* TBD: Call OS upper layer function to remove the device entry */
1208         device->invalid = true;
1209         os_remove_device(softs,device);
1210         DBG_FUNC("OUT\n");
1211 }
1212
1213
1214 /*
1215  * When exposing new device to OS fails then adjst list according to the
1216  * mid scsi list
1217  */
1218 static void pqisrc_adjust_list(pqisrc_softstate_t *softs,
1219         pqi_scsi_dev_t *device)
1220 {
1221         DBG_FUNC("IN\n");
1222
1223         if (!device) {
1224                 DBG_ERR("softs = %p: device is NULL !!!\n", softs);
1225                 return;
1226         }
1227
1228         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1229         softs->device_list[device->target][device->lun] = NULL;
1230         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1231         pqisrc_device_mem_free(softs, device);
1232
1233         DBG_FUNC("OUT\n");
1234 }
1235
1236 /* Debug routine used to display the RAID volume status of the device */
1237 static void pqisrc_display_volume_status(pqisrc_softstate_t *softs,
1238         pqi_scsi_dev_t *device)
1239 {
1240         char *status;
1241
1242         DBG_FUNC("IN\n");
1243         switch (device->volume_status) {
1244         case SA_LV_OK:
1245                 status = "Volume is online.";
1246                 break;
1247         case SA_LV_UNDERGOING_ERASE:
1248                 status = "Volume is undergoing background erase process.";
1249                 break;
1250         case SA_LV_NOT_AVAILABLE:
1251                 status = "Volume is waiting for transforming volume.";
1252                 break;
1253         case SA_LV_UNDERGOING_RPI:
1254                 status = "Volume is undergoing rapid parity initialization process.";
1255                 break;
1256         case SA_LV_PENDING_RPI:
1257                 status = "Volume is queued for rapid parity initialization process.";
1258                 break;
1259         case SA_LV_ENCRYPTED_NO_KEY:
1260                 status = "Volume is encrypted and cannot be accessed because key is not present.";
1261                 break;
1262         case SA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1263                 status = "Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.";
1264                 break;
1265         case SA_LV_UNDERGOING_ENCRYPTION:
1266                 status = "Volume is undergoing encryption process.";
1267                 break;
1268         case SA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1269                 status = "Volume is undergoing encryption re-keying process.";
1270                 break;
1271         case SA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1272                 status = "Volume is encrypted and cannot be accessed because controller does not have encryption enabled.";
1273                 break;
1274         case SA_LV_PENDING_ENCRYPTION:
1275                 status = "Volume is pending migration to encrypted state, but process has not started.";
1276                 break;
1277         case SA_LV_PENDING_ENCRYPTION_REKEYING:
1278                 status = "Volume is encrypted and is pending encryption rekeying.";
1279                 break;
1280         case SA_LV_STATUS_VPD_UNSUPPORTED:
1281                 status = "Volume status is not available through vital product data pages.";
1282                 break;
1283         default:
1284                 status = "Volume is in an unknown state.";
1285                 break;
1286         }
1287
1288         DBG_INFO("scsi BTL %d:%d:%d %s\n",
1289                 device->bus, device->target, device->lun, status);
1290         DBG_FUNC("OUT\n");
1291 }
1292
1293 void pqisrc_device_mem_free(pqisrc_softstate_t *softs, pqi_scsi_dev_t *device)
1294 {
1295         DBG_INFO("IN\n");
1296         if (!device)
1297                 return;
1298         if (device->raid_map) {
1299                         os_mem_free(softs, (char *)device->raid_map, sizeof(pqisrc_raid_map_t));
1300         }
1301         os_mem_free(softs, (char *)device,sizeof(*device));
1302         DBG_INFO("OUT\n");
1303         
1304 }
1305
1306 /* OS should call this function to free the scsi device */
1307 void pqisrc_free_device(pqisrc_softstate_t * softs,pqi_scsi_dev_t *device)
1308 {
1309
1310                 OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1311                 pqisrc_device_mem_free(softs, device);
1312                 OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1313
1314 }
1315
1316
1317 /* Update the newly added devices to the device list */
1318 static void pqisrc_update_device_list(pqisrc_softstate_t *softs,
1319         pqi_scsi_dev_t *new_device_list[], int num_new_devices)
1320 {
1321         int ret;
1322         int i;
1323         device_status_t dev_status;
1324         pqi_scsi_dev_t *device;
1325         pqi_scsi_dev_t *same_device;
1326         pqi_scsi_dev_t **added = NULL;
1327         pqi_scsi_dev_t **removed = NULL;
1328         int nadded = 0, nremoved = 0;
1329         int j;
1330         DBG_INFO("IN\n");
1331
1332         added = os_mem_alloc(softs, sizeof(*added) * PQI_MAX_DEVICES);
1333         removed = os_mem_alloc(softs, sizeof(*removed) * PQI_MAX_DEVICES);
1334
1335         if (!added || !removed) {
1336                 DBG_WARN("Out of memory \n");
1337                 goto free_and_out;
1338         }
1339         
1340         OS_ACQUIRE_SPINLOCK(&softs->devlist_lock);
1341         
1342         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1343                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1344                         if(softs->device_list[i][j] == NULL)
1345                                 continue;
1346                         device = softs->device_list[i][j];
1347                         device->device_gone = true;
1348                 }
1349         }
1350         DBG_IO("Device list used an array\n");
1351         for (i = 0; i < num_new_devices; i++) {
1352                 device = new_device_list[i];
1353
1354                 dev_status = pqisrc_scsi_find_entry(softs, device,
1355                         &same_device);
1356
1357                 switch (dev_status) {
1358                 case DEVICE_UNCHANGED:
1359                         /* New Device present in existing device list  */
1360                         device->new_device = false;
1361                         same_device->device_gone = false;
1362                         pqisrc_exist_device_update(softs, same_device, device);
1363                         break;
1364                 case DEVICE_NOT_FOUND:
1365                         /* Device not found in existing list */
1366                         device->new_device = true;
1367                         break;
1368                 case DEVICE_CHANGED:
1369                         /* Actual device gone need to add device to list*/
1370                         device->new_device = true;
1371                         break;
1372                 default:
1373                         break;
1374                 }
1375         }
1376         /* Process all devices that have gone away. */
1377         for(i = 0, nremoved = 0; i < PQI_MAX_DEVICES; i++) {
1378                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1379                         if(softs->device_list[i][j] == NULL)
1380                                 continue;
1381                         device = softs->device_list[i][j];
1382                         if (device->device_gone) {
1383                                 softs->device_list[device->target][device->lun] = NULL;
1384                                 removed[nremoved] = device;
1385                                 nremoved++;
1386                         }
1387                 }
1388         }
1389
1390         /* Process all new devices. */
1391         for (i = 0, nadded = 0; i < num_new_devices; i++) {
1392                 device = new_device_list[i];
1393                 if (!device->new_device)
1394                         continue;
1395                 if (device->volume_offline)
1396                         continue;
1397                 
1398                 softs->device_list[device->target][device->lun] = device;
1399                 DBG_INFO("Added device %p at B : %d T : %d L : %d\n",device,
1400                         device->bus,device->target,device->lun);
1401                 /* To prevent this entry from being freed later. */
1402                 new_device_list[i] = NULL;
1403                 added[nadded] = device;
1404                 nadded++;
1405         }
1406
1407         pqisrc_update_log_dev_qdepth(softs);
1408         
1409         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1410                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1411                         if(softs->device_list[i][j] == NULL)
1412                                 continue;
1413                         device = softs->device_list[i][j];
1414                         device->offload_enabled = device->offload_enabled_pending;
1415                 }
1416         }
1417
1418         OS_RELEASE_SPINLOCK(&softs->devlist_lock);
1419
1420         for(i = 0; i < nremoved; i++) {
1421                 device = removed[i];
1422                 if (device == NULL)
1423                         continue;
1424                 pqisrc_remove_device(softs, device);
1425                 pqisrc_display_device_info(softs, "removed", device);
1426                 
1427         }
1428
1429         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1430                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1431                         if(softs->device_list[i][j] == NULL)
1432                                 continue;
1433                         device = softs->device_list[i][j];
1434                         /*
1435                         * Notify the OS upper layer if the queue depth of any existing device has
1436                         * changed.
1437                         */
1438                         if (device->queue_depth !=
1439                                 device->advertised_queue_depth) {
1440                                 device->advertised_queue_depth = device->queue_depth;
1441                                 /* TBD: Call OS upper layer function to change device Q depth */
1442                         }
1443                 }
1444         }
1445         for(i = 0; i < nadded; i++) {
1446                 device = added[i];
1447                 if (device->expose_device) {
1448                         ret = pqisrc_add_device(softs, device);
1449                         if (ret) {
1450                                 DBG_WARN("scsi %d:%d:%d addition failed, device not added\n",
1451                                         device->bus, device->target,
1452                                         device->lun);
1453                                 pqisrc_adjust_list(softs, device);
1454                                 continue;
1455                         }
1456                 }
1457
1458                 pqisrc_display_device_info(softs, "added", device);
1459         }
1460
1461         /* Process all volumes that are offline. */
1462         for (i = 0; i < num_new_devices; i++) {
1463                 device = new_device_list[i];
1464                 if (!device)
1465                         continue;
1466                 if (!device->new_device)
1467                         continue;
1468                 if (device->volume_offline) {
1469                         pqisrc_display_volume_status(softs, device);
1470                         pqisrc_display_device_info(softs, "offline", device);
1471                 }
1472         }
1473
1474 free_and_out:
1475         if (added)
1476                 os_mem_free(softs, (char *)added,
1477                             sizeof(*added) * PQI_MAX_DEVICES); 
1478         if (removed)
1479                 os_mem_free(softs, (char *)removed,
1480                             sizeof(*removed) * PQI_MAX_DEVICES); 
1481
1482         DBG_INFO("OUT\n");
1483 }
1484
1485 /*
1486  * Let the Adapter know about driver version using one of BMIC
1487  * BMIC_WRITE_HOST_WELLNESS
1488  */
1489 int pqisrc_write_driver_version_to_host_wellness(pqisrc_softstate_t *softs)
1490 {
1491         int rval = PQI_STATUS_SUCCESS;
1492         struct bmic_host_wellness_driver_version *host_wellness_driver_ver;
1493         size_t data_length;
1494         pqisrc_raid_req_t request;
1495
1496         DBG_FUNC("IN\n");
1497
1498         memset(&request, 0, sizeof(request));   
1499         data_length = sizeof(*host_wellness_driver_ver);
1500
1501         host_wellness_driver_ver = os_mem_alloc(softs, data_length);
1502         if (!host_wellness_driver_ver) {
1503                 DBG_ERR("failed to allocate memory for host wellness driver_version\n");
1504                 return PQI_STATUS_FAILURE;
1505         }
1506
1507         host_wellness_driver_ver->start_tag[0] = '<';
1508         host_wellness_driver_ver->start_tag[1] = 'H';
1509         host_wellness_driver_ver->start_tag[2] = 'W';
1510         host_wellness_driver_ver->start_tag[3] = '>';
1511         host_wellness_driver_ver->driver_version_tag[0] = 'D';
1512         host_wellness_driver_ver->driver_version_tag[1] = 'V';
1513         host_wellness_driver_ver->driver_version_length = LE_16(sizeof(host_wellness_driver_ver->driver_version));
1514         strncpy(host_wellness_driver_ver->driver_version, softs->os_name,
1515         sizeof(host_wellness_driver_ver->driver_version));
1516     if (strlen(softs->os_name) < sizeof(host_wellness_driver_ver->driver_version) ) {
1517         strncpy(host_wellness_driver_ver->driver_version + strlen(softs->os_name), PQISRC_DRIVER_VERSION,
1518                         sizeof(host_wellness_driver_ver->driver_version) -  strlen(softs->os_name));
1519     } else {
1520         DBG_INFO("OS name length(%lu) is longer than buffer of driver_version\n",
1521             strlen(softs->os_name));
1522     }
1523         host_wellness_driver_ver->driver_version[sizeof(host_wellness_driver_ver->driver_version) - 1] = '\0';
1524         host_wellness_driver_ver->end_tag[0] = 'Z';
1525         host_wellness_driver_ver->end_tag[1] = 'Z';
1526
1527         rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_driver_ver,data_length,
1528                                         BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1529
1530         os_mem_free(softs, (char *)host_wellness_driver_ver, data_length);
1531         
1532         DBG_FUNC("OUT");
1533         return rval;
1534 }
1535
1536 /* 
1537  * Write current RTC time from host to the adapter using
1538  * BMIC_WRITE_HOST_WELLNESS
1539  */
1540 int pqisrc_write_current_time_to_host_wellness(pqisrc_softstate_t *softs)
1541 {
1542         int rval = PQI_STATUS_SUCCESS;
1543         struct bmic_host_wellness_time *host_wellness_time;
1544         size_t data_length;
1545         pqisrc_raid_req_t request;
1546
1547         DBG_FUNC("IN\n");
1548
1549         memset(&request, 0, sizeof(request));   
1550         data_length = sizeof(*host_wellness_time);
1551
1552         host_wellness_time = os_mem_alloc(softs, data_length);
1553         if (!host_wellness_time) {
1554                 DBG_ERR("failed to allocate memory for host wellness time structure\n");
1555                 return PQI_STATUS_FAILURE;
1556         }
1557
1558         host_wellness_time->start_tag[0] = '<';
1559         host_wellness_time->start_tag[1] = 'H';
1560         host_wellness_time->start_tag[2] = 'W';
1561         host_wellness_time->start_tag[3] = '>';
1562         host_wellness_time->time_tag[0] = 'T';
1563         host_wellness_time->time_tag[1] = 'D';
1564         host_wellness_time->time_length = LE_16(offsetof(struct bmic_host_wellness_time, time_length) - 
1565                                                                                         offsetof(struct bmic_host_wellness_time, century));
1566
1567         os_get_time(host_wellness_time);
1568
1569         host_wellness_time->dont_write_tag[0] = 'D';
1570         host_wellness_time->dont_write_tag[1] = 'W';
1571         host_wellness_time->end_tag[0] = 'Z';
1572         host_wellness_time->end_tag[1] = 'Z';
1573         
1574         rval = pqisrc_build_send_raid_request(softs, &request, host_wellness_time,data_length,
1575                                         BMIC_WRITE_HOST_WELLNESS, 0, (uint8_t *)RAID_CTLR_LUNID, NULL);
1576         
1577         os_mem_free(softs, (char *)host_wellness_time, data_length);
1578
1579         DBG_FUNC("OUT");
1580         return rval;
1581 }
1582
1583 /*
1584  * Function used to perform a rescan of scsi devices
1585  * for any config change events
1586  */
1587 int pqisrc_scan_devices(pqisrc_softstate_t *softs)
1588 {
1589         boolean_t is_physical_device;
1590         int ret = PQI_STATUS_FAILURE;
1591         int i;
1592         int new_dev_cnt;
1593         int phy_log_dev_cnt;
1594         uint8_t *scsi3addr;
1595         uint32_t physical_cnt;
1596         uint32_t logical_cnt;
1597         uint32_t ndev_allocated = 0;
1598         size_t phys_data_length, log_data_length;
1599         reportlun_data_ext_t *physical_dev_list = NULL;
1600         reportlun_data_ext_t *logical_dev_list = NULL;
1601         reportlun_ext_entry_t *lun_ext_entry = NULL;
1602         bmic_ident_physdev_t *bmic_phy_info = NULL;
1603         pqi_scsi_dev_t **new_device_list = NULL;
1604         pqi_scsi_dev_t *device = NULL;
1605         
1606
1607         DBG_FUNC("IN\n");
1608
1609         ret = pqisrc_get_phys_log_device_list(softs, &physical_dev_list, &logical_dev_list,
1610                                       &phys_data_length, &log_data_length);
1611
1612         if (ret)
1613                 goto err_out;
1614
1615         physical_cnt = BE_32(physical_dev_list->header.list_length) 
1616                 / sizeof(physical_dev_list->lun_entries[0]);
1617         
1618         logical_cnt = BE_32(logical_dev_list->header.list_length)
1619                 / sizeof(logical_dev_list->lun_entries[0]);
1620
1621         DBG_INFO("physical_cnt %d logical_cnt %d\n", physical_cnt, logical_cnt);
1622
1623         if (physical_cnt) {
1624                 bmic_phy_info = os_mem_alloc(softs, sizeof(*bmic_phy_info));
1625                 if (bmic_phy_info == NULL) {
1626                         ret = PQI_STATUS_FAILURE;
1627                         DBG_ERR("failed to allocate memory for BMIC ID PHYS Device : %d\n", ret);
1628                         goto err_out;
1629                 }
1630         }
1631         phy_log_dev_cnt = physical_cnt + logical_cnt;
1632         new_device_list = os_mem_alloc(softs,
1633                                 sizeof(*new_device_list) * phy_log_dev_cnt);
1634
1635         if (new_device_list == NULL) {
1636                 ret = PQI_STATUS_FAILURE;
1637                 DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1638                 goto err_out;
1639         }
1640
1641         for (i = 0; i < phy_log_dev_cnt; i++) {
1642                 new_device_list[i] = os_mem_alloc(softs,
1643                                                 sizeof(*new_device_list[i]));
1644                 if (new_device_list[i] == NULL) {
1645                         ret = PQI_STATUS_FAILURE;
1646                         DBG_ERR("failed to allocate memory for device list : %d\n", ret);
1647                         ndev_allocated = i;
1648                         goto err_out;
1649                 }
1650         }
1651
1652         ndev_allocated = phy_log_dev_cnt;
1653         new_dev_cnt = 0;
1654         for (i = 0; i < phy_log_dev_cnt; i++) {
1655
1656                 if (i < physical_cnt) {
1657                         is_physical_device = true;
1658                         lun_ext_entry = &physical_dev_list->lun_entries[i];
1659                 } else {
1660                         is_physical_device = false;
1661                         lun_ext_entry =
1662                                 &logical_dev_list->lun_entries[i - physical_cnt];
1663                 }
1664
1665                 scsi3addr = lun_ext_entry->lunid;
1666
1667                 /* Skip masked physical non-disk devices. */
1668                 if (MASKED_DEVICE(scsi3addr) && is_physical_device) 
1669                         continue;
1670
1671                 device = new_device_list[new_dev_cnt];
1672                 memset(device, 0, sizeof(*device));
1673                 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1674                 device->wwid = lun_ext_entry->wwid;
1675                 device->is_physical_device = is_physical_device;
1676                 if (!is_physical_device)
1677                         device->is_external_raid_device =
1678                                 pqisrc_is_external_raid_addr(scsi3addr);
1679                 
1680
1681                 /* Get device type, vendor, model, device ID. */
1682                 ret = pqisrc_get_dev_data(softs, device);
1683                 if (ret) {
1684                         DBG_WARN("Inquiry failed, skipping device %016llx\n",
1685                                  (unsigned long long)BE_64(device->scsi3addr[0]));
1686                         DBG_INFO("INQUIRY FAILED \n");
1687                         continue;
1688                 }
1689                 pqisrc_assign_btl(device);
1690
1691                 /*
1692                  * Expose all devices except for physical devices that
1693                  * are masked.
1694                  */
1695                 if (device->is_physical_device &&
1696                         MASKED_DEVICE(scsi3addr))
1697                         device->expose_device = false;
1698                 else
1699                         device->expose_device = true;
1700
1701                 if (device->is_physical_device &&
1702                     (lun_ext_entry->device_flags &
1703                      REPORT_LUN_DEV_FLAG_AIO_ENABLED) &&
1704                      lun_ext_entry->ioaccel_handle) {
1705                         device->aio_enabled = true;
1706                 }
1707                 switch (device->devtype) {
1708                 case ROM_DEVICE:
1709                         /*
1710                          * We don't *really* support actual CD-ROM devices,
1711                          * but we do support the HP "One Button Disaster
1712                          * Recovery" tape drive which temporarily pretends to
1713                          * be a CD-ROM drive.
1714                          */
1715                         if (device->is_obdr_device)
1716                                 new_dev_cnt++;
1717                         break;
1718                 case DISK_DEVICE:
1719                 case ZBC_DEVICE:
1720                         if (device->is_physical_device) {
1721                                 device->ioaccel_handle =
1722                                         lun_ext_entry->ioaccel_handle;
1723                                 device->sas_address = BE_64(lun_ext_entry->wwid);
1724                                 pqisrc_get_physical_device_info(softs, device,
1725                                         bmic_phy_info);
1726                         }
1727                         /* Logical device doesn't have SAS address
1728                          * so requires target SAS address for MSA.
1729                          */ 
1730                         if(device->is_external_raid_device)
1731                                 device->sas_address = BE_64((uint64_t)lun_ext_entry->lunid);
1732                         new_dev_cnt++;
1733                         break;
1734                 case ENCLOSURE_DEVICE:
1735                         if (device->is_physical_device) {
1736                                 device->sas_address = BE_64(lun_ext_entry->wwid);
1737                         }
1738                         new_dev_cnt++;
1739                         break;  
1740                 case TAPE_DEVICE:
1741                 case MEDIUM_CHANGER_DEVICE:
1742                         new_dev_cnt++;
1743                         break;
1744                 case RAID_DEVICE:
1745                         /*
1746                          * Only present the HBA controller itself as a RAID
1747                          * controller.  If it's a RAID controller other than
1748                          * the HBA itself (an external RAID controller, MSA500
1749                          * or similar), don't present it.
1750                          */
1751                         if (pqisrc_is_hba_lunid(scsi3addr))
1752                                 new_dev_cnt++;
1753                         break;
1754                 }
1755         }
1756         DBG_INFO("new_dev_cnt %d\n", new_dev_cnt);
1757
1758         pqisrc_update_device_list(softs, new_device_list, new_dev_cnt);
1759         
1760 err_out:
1761         if (new_device_list) {
1762                 for (i = 0; i < ndev_allocated; i++) {
1763                         if (new_device_list[i]) {
1764                                 if(new_device_list[i]->raid_map)
1765                                         os_mem_free(softs, (char *)new_device_list[i]->raid_map,
1766                                                                                 sizeof(pqisrc_raid_map_t));
1767                                 os_mem_free(softs, (char*)new_device_list[i],
1768                                                                 sizeof(*new_device_list[i]));
1769                         }
1770                 }
1771                 os_mem_free(softs, (char *)new_device_list,
1772                                         sizeof(*new_device_list) * ndev_allocated); 
1773         }
1774         if(physical_dev_list)
1775                 os_mem_free(softs, (char *)physical_dev_list, phys_data_length);
1776         if(logical_dev_list)
1777                 os_mem_free(softs, (char *)logical_dev_list, log_data_length);
1778         if (bmic_phy_info)
1779                 os_mem_free(softs, (char *)bmic_phy_info, sizeof(*bmic_phy_info));
1780         
1781         DBG_FUNC("OUT \n");
1782
1783         return ret;
1784 }
1785
1786 /*
1787  * Clean up memory allocated for devices.
1788  */
1789 void pqisrc_cleanup_devices(pqisrc_softstate_t *softs)
1790 {
1791
1792         int i = 0,j = 0;
1793         pqi_scsi_dev_t *dvp = NULL;
1794         DBG_FUNC("IN\n");
1795         
1796         for(i = 0; i < PQI_MAX_DEVICES; i++) {
1797                 for(j = 0; j < PQI_MAX_MULTILUN; j++) {
1798                         if (softs->device_list[i][j] == NULL) 
1799                                 continue;
1800                         dvp = softs->device_list[i][j];
1801                         pqisrc_device_mem_free(softs, dvp);
1802                 }
1803         }
1804         DBG_FUNC("OUT\n");
1805 }
1806