2 * Copyright 2016-2021 Microchip Technology, Inc. and/or its subsidiaries.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include "smartpqi_includes.h"
31 * Attempt to perform offload RAID mapping for a logical volume I/O.
36 #define HPSA_RAID_1 2 /* also used for RAID 10 */
37 #define HPSA_RAID_5 3 /* also used for RAID 50 */
38 #define HPSA_RAID_51 4
39 #define HPSA_RAID_6 5 /* also used for RAID 60 */
40 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
41 #define HPSA_RAID_MAX HPSA_RAID_ADM
42 #define HPSA_RAID_UNKNOWN 0xff
44 #define SG_FLAG_LAST 0x40000000
45 #define SG_FLAG_CHAIN 0x80000000
47 /* Subroutine to find out embedded sgl count in IU */
48 static inline uint32_t
49 pqisrc_embedded_sgl_count(uint32_t elem_alloted)
51 uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
54 calculate embedded sgl count using num_elem_alloted for IO
57 embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
58 DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count);
62 return embedded_sgl_count;
66 /* Subroutine to find out contiguous free elem in IU */
67 static inline uint32_t
68 pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
70 uint32_t contiguous_free_elem = 0;
75 contiguous_free_elem = (elem_in_q - pi);
77 contiguous_free_elem -= 1;
79 contiguous_free_elem = (ci - pi - 1);
84 return contiguous_free_elem;
87 /* Subroutine to find out num of elements need for the request */
89 pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
92 uint32_t num_elem_required = 1;
94 DBG_IO("SGL_Count :%d",SG_Count);
96 If SG_Count greater than max sg per IU i.e 4 or 68
97 (4 is with out spanning or 68 is with spanning) chaining is required.
98 OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
99 on these two cases one element is enough.
101 if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU)
102 return num_elem_required;
104 SGL Count Other Than First IU
106 num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU;
107 num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
109 return num_elem_required;
112 /* Subroutine to build SG list for the IU submission*/
114 pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
115 uint32_t num_elem_alloted)
118 uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
119 sgt_t *sgt = sg_array;
120 sgt_t *sg_chain = NULL;
121 boolean_t partial = false;
125 DBG_IO("SGL_Count :%d",num_sg);
130 if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) {
131 for (i = 0; i < num_sg; i++, sgt++) {
132 sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
133 sgt->len= OS_GET_IO_SG_LEN(rcb,i);
137 sg_array[num_sg - 1].flags = SG_FLAG_LAST;
142 sg_chain = rcb->sg_chain_virt;
143 sgt->addr = rcb->sg_chain_dma;
144 sgt->len = num_sg * sizeof(sgt_t);
145 sgt->flags = SG_FLAG_CHAIN;
148 for (i = 0; i < num_sg; i++, sgt++) {
149 sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
150 sgt->len = OS_GET_IO_SG_LEN(rcb,i);
154 sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
160 iu_hdr->iu_length = num_sg * sizeof(sgt_t);
166 /*Subroutine used to Build the RAID request */
168 pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
169 pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
173 raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
174 raid_req->header.comp_feature = 0;
175 raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
176 raid_req->work_area[0] = 0;
177 raid_req->work_area[1] = 0;
178 raid_req->request_id = rcb->tag;
179 raid_req->nexus_id = 0;
180 raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
181 memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
182 sizeof(raid_req->lun_number));
183 raid_req->protocol_spec = 0;
184 raid_req->data_direction = rcb->data_dir;
185 raid_req->reserved1 = 0;
187 raid_req->error_index = raid_req->request_id;
188 raid_req->reserved2 = 0;
189 raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
190 raid_req->command_priority = 0;
191 raid_req->reserved3 = 0;
192 raid_req->reserved4 = 0;
193 raid_req->reserved5 = 0;
195 /* As cdb and additional_cdb_bytes are contiguous,
196 update them in a single statement */
197 memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
200 for(i = 0; i < rcb->cmdlen ; i++)
201 DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
204 switch (rcb->cmdlen) {
209 raid_req->additional_cdb_bytes_usage =
210 PQI_ADDITIONAL_CDB_BYTES_0;
213 raid_req->additional_cdb_bytes_usage =
214 PQI_ADDITIONAL_CDB_BYTES_4;
217 raid_req->additional_cdb_bytes_usage =
218 PQI_ADDITIONAL_CDB_BYTES_8;
221 raid_req->additional_cdb_bytes_usage =
222 PQI_ADDITIONAL_CDB_BYTES_12;
225 default: /* todo:review again */
226 raid_req->additional_cdb_bytes_usage =
227 PQI_ADDITIONAL_CDB_BYTES_16;
231 /* Frame SGL Descriptor */
232 raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
233 &raid_req->header, num_elem_alloted);
235 raid_req->header.iu_length +=
236 offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
239 DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
240 DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
241 DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id);
242 DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length);
243 DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute);
244 DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number);
245 DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index);
246 DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
247 DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
248 DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
250 rcb->success_cmp_callback = pqisrc_process_io_response_success;
251 rcb->error_cmp_callback = pqisrc_process_raid_response_error;
252 rcb->resp_qid = raid_req->response_queue_id;
258 /*Subroutine used to Build the AIO request */
260 pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
261 pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
265 aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
266 aio_req->header.comp_feature = 0;
267 aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
268 aio_req->work_area[0] = 0;
269 aio_req->work_area[1] = 0;
270 aio_req->req_id = rcb->tag;
271 aio_req->res1[0] = 0;
272 aio_req->res1[1] = 0;
273 aio_req->nexus = rcb->ioaccel_handle;
274 aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
275 aio_req->data_dir = rcb->data_dir;
276 aio_req->mem_type = 0;
279 aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
280 aio_req->cmd_prio = 0;
282 aio_req->err_idx = aio_req->req_id;
283 aio_req->cdb_len = rcb->cmdlen;
285 if(rcb->cmdlen > sizeof(aio_req->cdb))
286 rcb->cmdlen = sizeof(aio_req->cdb);
287 memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
290 for(int i = 0; i < rcb->cmdlen ; i++)
291 DBG_IO(" 0x%x \n",aio_req->cdb[i]);
293 memset(aio_req->lun,0,sizeof(aio_req->lun));
294 memset(aio_req->res4,0,sizeof(aio_req->res4));
296 if(rcb->encrypt_enable == true) {
297 aio_req->encrypt_enable = true;
298 aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
299 aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower);
300 aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper);
302 aio_req->encrypt_enable = 0;
303 aio_req->encrypt_key_index = 0;
304 aio_req->encrypt_twk_high = 0;
305 aio_req->encrypt_twk_low = 0;
308 /* Frame SGL Descriptor */
309 aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
310 &aio_req->header, num_elem_alloted);
312 aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
314 DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
316 aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
319 DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
320 DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid);
321 DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id);
322 DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus);
323 DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len);
324 DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir);
325 DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr);
326 DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx);
327 DBG_IO("aio_req->num_sg :%d",aio_req->num_sg);
328 DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr);
329 DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
330 DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
333 rcb->success_cmp_callback = pqisrc_process_io_response_success;
334 rcb->error_cmp_callback = pqisrc_process_aio_response_error;
335 rcb->resp_qid = aio_req->response_queue_id;
341 /*Function used to build and send RAID/AIO */
343 pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
345 ib_queue_t *ib_q_array = softs->op_aio_ib_q;
346 ib_queue_t *ib_q = NULL;
348 IO_PATH_T io_path = AIO_PATH;
349 uint32_t TraverseCount = 0;
350 int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
351 int qindex = first_qindex;
352 uint32_t num_op_ib_q = softs->num_op_aio_ibq;
353 uint32_t num_elem_needed;
354 uint32_t num_elem_alloted = 0;
355 pqi_scsi_dev_t *devp = rcb->dvp;
356 uint8_t raidbypass_cdb[16];
360 if(!rcb->aio_retry) {
361 rcb->cdbp = OS_GET_CDBP(rcb);
362 if(IS_AIO_PATH(devp)) {
363 /** IO for Physical Drive **/
364 /** Send in AIO PATH**/
365 rcb->ioaccel_handle = devp->ioaccel_handle;
367 int ret = PQI_STATUS_FAILURE;
368 /** IO for RAID Volume **/
369 if (devp->offload_enabled) {
370 /** ByPass IO ,Send in AIO PATH **/
371 ret = pqisrc_send_scsi_cmd_raidbypass(softs,
372 devp, rcb, raidbypass_cdb);
374 if (PQI_STATUS_FAILURE == ret) {
375 /** Send in RAID PATH **/
377 num_op_ib_q = softs->num_op_raid_ibq;
378 ib_q_array = softs->op_raid_ib_q;
380 rcb->cdbp = raidbypass_cdb;
384 /* Retrying failed AIO IO */
386 rcb->cdbp = OS_GET_CDBP(rcb);
387 num_op_ib_q = softs->num_op_raid_ibq;
388 ib_q_array = softs->op_raid_ib_q;
391 num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
392 DBG_IO("num_elem_needed :%d",num_elem_needed);
395 uint32_t num_elem_available;
396 ib_q = (ib_q_array + qindex);
397 PQI_LOCK(&ib_q->lock);
398 num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
399 *(ib_q->ci_virt_addr), ib_q->num_elem);
401 DBG_IO("num_elem_avialable :%d\n",num_elem_available);
402 if(num_elem_available >= num_elem_needed) {
403 num_elem_alloted = num_elem_needed;
406 DBG_IO("Current queue is busy! Hop to next queue\n");
408 PQI_UNLOCK(&ib_q->lock);
409 qindex = (qindex + 1) % num_op_ib_q;
410 if(qindex == first_qindex) {
411 if (num_elem_needed == 1)
416 }while(TraverseCount < 2);
418 DBG_IO("num_elem_alloted :%d",num_elem_alloted);
419 if (num_elem_alloted == 0) {
420 DBG_WARN("OUT: IB Queues were full\n");
421 return PQI_STATUS_QFULL;
424 pqisrc_increment_device_active_io(softs,devp);
426 /* Get IB Queue Slot address to build IU */
427 ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
429 if(io_path == AIO_PATH) {
430 /** Build AIO structure **/
431 pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
434 /** Build RAID structure **/
435 pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
439 rcb->req_pending = true;
443 /* Update the local PI */
444 ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
446 DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local);
447 DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
449 /* Inform the fw about the new IU */
450 PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
452 PQI_UNLOCK(&ib_q->lock);
454 return PQI_STATUS_SUCCESS;
457 /* Subroutine used to set encryption info as part of RAID bypass IO*/
459 pqisrc_set_enc_info(struct pqi_enc_info *enc_info,
460 struct raid_map *raid_map, uint64_t first_block)
462 uint32_t volume_blk_size;
465 * Set the encryption tweak values based on logical block address.
466 * If the block size is 512, the tweak value is equal to the LBA.
467 * For other block sizes, tweak value is (LBA * block size) / 512.
469 volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
470 if (volume_blk_size != 512)
471 first_block = (first_block * volume_blk_size) / 512;
473 enc_info->data_enc_key_index =
474 GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
475 enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
476 enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
479 /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
481 check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
489 *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) |
490 (cdb[2] << 8) | cdb[3]);
491 *blk_cnt = (uint32_t)cdb[4];
498 *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
499 *blk_cnt = (uint32_t)GET_BE16(&cdb[7]);
504 *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
505 *blk_cnt = GET_BE32(&cdb[6]);
510 *fst_blk = GET_BE64(&cdb[2]);
511 *blk_cnt = GET_BE32(&cdb[10]);
514 /* Process via normal I/O path. */
515 return PQI_STATUS_FAILURE;
517 return PQI_STATUS_SUCCESS;
520 /* print any arbitrary buffer of length total_len */
522 pqisrc_print_buffer(pqisrc_softstate_t *softs, char *msg, void *user_buf,
523 uint32_t total_len, uint32_t flags)
525 #define LINE_BUF_LEN 60
526 #define INDEX_PER_LINE 16
527 uint32_t buf_consumed = 0;
529 char line_buf[LINE_BUF_LEN];
530 int line_len; /* written length per line */
533 if (user_buf == NULL)
536 /* Print index columns */
537 if (flags & PRINT_FLAG_HDR_COLUMN)
539 for (ii = 0, line_len = 0; ii < MIN(total_len, 16); ii++)
541 line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02d ", ii);
542 if ((line_len + 4) >= LINE_BUF_LEN)
545 DBG_NOTE("%15.15s:[ %s ]\n", "header", line_buf);
548 /* Print index columns */
549 while(buf_consumed < total_len)
551 memset(line_buf, 0, LINE_BUF_LEN);
553 for (ii = 0, line_len = 0; ii < INDEX_PER_LINE; ii++)
555 this_char = *((char*)(user_buf) + buf_consumed);
556 line_len += snprintf(line_buf + line_len, (LINE_BUF_LEN - line_len), "%02x ", this_char);
559 if (buf_consumed >= total_len || (line_len + 4) >= LINE_BUF_LEN)
562 DBG_NOTE("%15.15s:[ %s ]\n", msg, line_buf);
568 * Function used to build and send RAID bypass request to the adapter
571 pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
572 pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
574 struct raid_map *raid_map;
575 boolean_t is_write = false;
577 uint64_t fst_blk, lst_blk;
578 uint32_t blk_cnt, blks_per_row;
579 uint64_t fst_row, lst_row;
580 uint32_t fst_row_offset, lst_row_offset;
581 uint32_t fst_col, lst_col;
582 uint32_t r5or6_blks_per_row;
583 uint64_t r5or6_fst_row, r5or6_lst_row;
584 uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset;
585 uint32_t r5or6_fst_col, r5or6_lst_col;
586 uint16_t data_disks_per_row, total_disks_per_row;
587 uint16_t layout_map_count;
590 uint32_t fst_grp, lst_grp, cur_grp;
593 uint32_t disk_blk_cnt;
595 int offload_to_mirror;
600 /* Check for eligible opcode, get LBA and block count. */
601 memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
603 for(i = 0; i < rcb->cmdlen ; i++)
604 DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
605 if(check_for_scsi_opcode(cdb, &is_write,
606 &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
607 return PQI_STATUS_FAILURE;
608 /* Check for write to non-RAID-0. */
609 if (is_write && device->raid_level != SA_RAID_0)
610 return PQI_STATUS_FAILURE;
613 return PQI_STATUS_FAILURE;
615 lst_blk = fst_blk + blk_cnt - 1;
616 raid_map = device->raid_map;
618 /* Check for invalid block or wraparound. */
619 if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) ||
621 return PQI_STATUS_FAILURE;
623 data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row);
624 strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size));
625 layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count));
627 /* Calculate stripe information for the request. */
628 blks_per_row = data_disks_per_row * strip_sz;
630 return PQI_STATUS_FAILURE; /*Send the IO in raid path itself, not AIO or raidbypass*/
632 /* use __udivdi3 ? */
633 fst_row = fst_blk / blks_per_row;
634 lst_row = lst_blk / blks_per_row;
635 fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row));
636 lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row));
637 fst_col = fst_row_offset / strip_sz;
638 lst_col = lst_row_offset / strip_sz;
640 /* If this isn't a single row/column then give to the controller. */
641 if (fst_row != lst_row || fst_col != lst_col)
642 return PQI_STATUS_FAILURE;
644 /* Proceeding with driver mapping. */
645 total_disks_per_row = data_disks_per_row +
646 GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row));
647 map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
648 GET_LE16((uint8_t *)(&raid_map->row_cnt));
649 map_idx = (map_row * total_disks_per_row) + fst_col;
652 if (device->raid_level == SA_RAID_1) {
653 if (device->offload_to_mirror)
654 map_idx += data_disks_per_row;
655 device->offload_to_mirror = !device->offload_to_mirror;
656 } else if (device->raid_level == SA_RAID_ADM) {
659 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
662 offload_to_mirror = device->offload_to_mirror;
663 if (offload_to_mirror == 0) {
664 /* use physical disk in the first mirrored group. */
665 map_idx %= data_disks_per_row;
669 * Determine mirror group that map_idx
672 cur_grp = map_idx / data_disks_per_row;
674 if (offload_to_mirror != cur_grp) {
676 layout_map_count - 1) {
678 * Select raid index from
681 map_idx += data_disks_per_row;
685 * Select raid index from first
688 map_idx %= data_disks_per_row;
692 } while (offload_to_mirror != cur_grp);
695 /* Set mirror group to use next time. */
697 (offload_to_mirror >= layout_map_count - 1) ?
698 0 : offload_to_mirror + 1;
699 if(offload_to_mirror >= layout_map_count)
700 return PQI_STATUS_FAILURE;
702 device->offload_to_mirror = offload_to_mirror;
704 * Avoid direct use of device->offload_to_mirror within this
705 * function since multiple threads might simultaneously
706 * increment it beyond the range of device->layout_map_count -1.
708 } else if ((device->raid_level == SA_RAID_5 ||
709 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
711 /* Verify first and last block are in same RAID group */
712 r5or6_blks_per_row = strip_sz * data_disks_per_row;
713 stripesz = r5or6_blks_per_row * layout_map_count;
715 fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row;
716 lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row;
718 if (fst_grp != lst_grp)
719 return PQI_STATUS_FAILURE;
721 /* Verify request is in a single row of RAID 5/6 */
722 fst_row = r5or6_fst_row =
724 r5or6_lst_row = lst_blk / stripesz;
726 if (r5or6_fst_row != r5or6_lst_row)
727 return PQI_STATUS_FAILURE;
729 /* Verify request is in a single column */
730 fst_row_offset = r5or6_fst_row_offset =
731 (uint32_t)((fst_blk % stripesz) %
734 r5or6_lst_row_offset =
735 (uint32_t)((lst_blk % stripesz) %
738 fst_col = r5or6_fst_row_offset / strip_sz;
739 r5or6_fst_col = fst_col;
740 r5or6_lst_col = r5or6_lst_row_offset / strip_sz;
742 if (r5or6_fst_col != r5or6_lst_col)
743 return PQI_STATUS_FAILURE;
745 /* Request is eligible */
747 ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
748 GET_LE16((uint8_t *)(&raid_map->row_cnt));
751 (GET_LE16((uint8_t *)(&raid_map->row_cnt)) *
752 total_disks_per_row)) +
753 (map_row * total_disks_per_row) + fst_col;
756 rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
757 disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
759 (fst_row_offset - fst_col * strip_sz);
760 disk_blk_cnt = blk_cnt;
762 /* Handle differing logical/physical block sizes. */
763 if (raid_map->phys_blk_shift) {
764 disk_block <<= raid_map->phys_blk_shift;
765 disk_blk_cnt <<= raid_map->phys_blk_shift;
768 if (disk_blk_cnt > 0xffff)
769 return PQI_STATUS_FAILURE;
771 /* Build the new CDB for the physical disk I/O. */
772 if (disk_block > 0xffffffff) {
773 cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16;
775 PUT_BE64(disk_block, &cdb[2]);
776 PUT_BE32(disk_blk_cnt, &cdb[10]);
781 cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10;
783 PUT_BE32(disk_block, &cdb[2]);
785 PUT_BE16(disk_blk_cnt, &cdb[7]);
790 if (GET_LE16((uint8_t *)(&raid_map->flags)) &
791 RAID_MAP_ENCRYPTION_ENABLED) {
792 pqisrc_set_enc_info(&rcb->enc_info, raid_map,
794 rcb->encrypt_enable = true;
796 rcb->encrypt_enable = false;
799 rcb->cmdlen = cdb_length;
804 return PQI_STATUS_SUCCESS;
807 /* Function used to submit an AIO TMF to the adapter
808 * DEVICE_RESET is not supported.
811 pqisrc_send_aio_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
812 rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
814 int rval = PQI_STATUS_SUCCESS;
815 pqi_aio_tmf_req_t tmf_req;
816 ib_queue_t *op_ib_q = NULL;
818 memset(&tmf_req, 0, sizeof(pqi_aio_tmf_req_t));
822 tmf_req.header.iu_type = PQI_REQUEST_IU_AIO_TASK_MANAGEMENT;
823 tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
824 tmf_req.req_id = rcb->tag;
825 tmf_req.error_idx = rcb->tag;
826 tmf_req.nexus = devp->ioaccel_handle;
827 //memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
828 tmf_req.tmf = tmf_type;
829 tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
830 op_ib_q = &softs->op_aio_ib_q[0];
832 if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
833 tmf_req.req_id_to_manage = rcb_to_manage->tag;
834 tmf_req.nexus = rcb_to_manage->ioaccel_handle;
837 DBG_INFO("tmf_req.header.iu_type : %x tmf_req.req_id_to_manage :%d \n",tmf_req.header.iu_type,tmf_req.req_id_to_manage);
838 DBG_INFO("tmf_req.req_id : %d tmf_req.nexus : %x tmf_req.tmf %x QID : %d\n",tmf_req.req_id,tmf_req.nexus,tmf_req.tmf,op_ib_q->q_id);
840 rcb->req_pending = true;
841 /* Timedout tmf response goes here */
842 rcb->error_cmp_callback = pqisrc_process_aio_response_error;
844 rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
845 if (rval != PQI_STATUS_SUCCESS) {
846 DBG_ERR("Unable to submit command rval=%d\n", rval);
850 rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
851 if (rval != PQI_STATUS_SUCCESS){
852 DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
856 if (rcb->status != REQUEST_SUCCESS) {
857 DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
858 "stat:0x%x\n", tmf_type, rcb->status);
859 rval = PQI_STATUS_FAILURE;
866 /* Function used to submit a Raid TMF to the adapter */
868 pqisrc_send_raid_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
869 rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
871 int rval = PQI_STATUS_SUCCESS;
872 pqi_raid_tmf_req_t tmf_req;
873 ib_queue_t *op_ib_q = NULL;
875 memset(&tmf_req, 0, sizeof(pqi_raid_tmf_req_t));
879 tmf_req.header.iu_type = PQI_REQUEST_IU_RAID_TASK_MANAGEMENT;
880 tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
881 tmf_req.req_id = rcb->tag;
883 memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
884 tmf_req.tmf = tmf_type;
885 tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
887 /* Decide the queue where the tmf request should be submitted */
888 if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
889 tmf_req.obq_id_to_manage = rcb_to_manage->resp_qid;
890 tmf_req.req_id_to_manage = rcb_to_manage->tag;
893 if (softs->timeout_in_tmf &&
894 tmf_type == SOP_TASK_MANAGEMENT_LUN_RESET) {
895 /* OS_TMF_TIMEOUT_SEC - 1 to accommodate driver processing */
896 tmf_req.timeout_in_sec = OS_TMF_TIMEOUT_SEC - 1;
897 /* if OS tmf timeout is 0, set minimum value for timeout */
898 if (!tmf_req.timeout_in_sec)
899 tmf_req.timeout_in_sec = 1;
902 op_ib_q = &softs->op_raid_ib_q[0];
903 rcb->req_pending = true;
904 /* Timedout tmf response goes here */
905 rcb->error_cmp_callback = pqisrc_process_raid_response_error;
907 rval = pqisrc_submit_cmnd(softs, op_ib_q, &tmf_req);
908 if (rval != PQI_STATUS_SUCCESS) {
909 DBG_ERR("Unable to submit command rval=%d\n", rval);
913 rval = pqisrc_wait_on_condition(softs, rcb, PQISRC_TMF_TIMEOUT);
914 if (rval != PQI_STATUS_SUCCESS) {
915 DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
919 if (rcb->status != REQUEST_SUCCESS) {
920 DBG_NOTE("Task Management failed tmf_type:%d "
921 "stat:0x%x\n", tmf_type, rcb->status);
922 rval = PQI_STATUS_FAILURE;
930 pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
931 rcb_t *rcb, rcb_t *rcb_to_manage, int tmf_type)
933 int ret = PQI_STATUS_SUCCESS;
937 if(!devp->is_physical_device) {
938 if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK) {
939 if(rcb_to_manage->path == AIO_PATH) {
940 if(devp->offload_enabled)
941 ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
944 DBG_INFO("TASK ABORT not supported in raid\n");
945 ret = PQI_STATUS_FAILURE;
949 ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
952 if (tmf_type == SOP_TASK_MANAGEMENT_FUNCTION_ABORT_TASK)
953 ret = pqisrc_send_aio_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
955 ret = pqisrc_send_raid_tmf(softs, devp, rcb, rcb_to_manage, tmf_type);
964 * Function used to build and send the vendor general request
965 * Used for configuring PQI feature bits between firmware and driver
968 pqisrc_build_send_vendor_request(
969 pqisrc_softstate_t *softs,
970 pqi_vendor_general_request_t *request,
971 raid_path_error_info_elem_t *error_info)
973 int ret = PQI_STATUS_SUCCESS;
974 ib_queue_t *op_ib_q = &softs->op_raid_ib_q[PQI_DEFAULT_IB_QUEUE];
975 ob_queue_t *ob_q = &softs->op_ob_q[PQI_DEFAULT_IB_QUEUE];
979 uint16_t request_id = 0;
982 request_id = pqisrc_get_tag(&softs->taglist);
983 if (INVALID_ELEM == request_id) {
984 DBG_ERR("Tag not available\n");
985 ret = PQI_STATUS_FAILURE;
989 ((pqi_vendor_general_request_t *)request)->request_id = request_id;
990 ((pqi_vendor_general_request_t *)request)->response_queue_id = ob_q->q_id;
992 rcb = &softs->rcb[request_id];
994 rcb->req_pending = true;
995 rcb->tag = request_id;
997 ret = pqisrc_submit_cmnd(softs, op_ib_q, request);
999 if (ret != PQI_STATUS_SUCCESS) {
1000 DBG_ERR("Unable to submit command\n");
1004 ret = pqisrc_wait_on_condition(softs, rcb, PQISRC_CMD_TIMEOUT);
1005 if (ret != PQI_STATUS_SUCCESS) {
1006 DBG_ERR("Management request timed out!\n");
1012 ret = PQI_STATUS_FAILURE;
1014 // TODO: config table err handling.
1018 ret = PQI_STATUS_SUCCESS;
1019 memset(error_info, 0, sizeof(*error_info));
1024 pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
1029 DBG_ERR("Vender general request submission failed.\n");
1031 pqisrc_put_tag(&softs->taglist, ((pqi_vendor_general_request_t *)request)->request_id);
1033 DBG_FUNC("FAILED \n");
1037 /* return the path as ASCII-string */
1039 io_path_to_ascii(IO_PATH_T path)
1043 case AIO_PATH: return "Aio";
1044 case RAID_PATH: return "Raid";
1045 default: return "Unknown";