2 * Copyright (c) 2018 Microsemi Corporation.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include "smartpqi_includes.h"
31 #define SG_FLAG_LAST 0x40000000
32 #define SG_FLAG_CHAIN 0x80000000
34 /* Subroutine to find out embedded sgl count in IU */
36 uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted)
38 uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
41 calculate embedded sgl count using num_elem_alloted for IO
44 embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
45 DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count);
49 return embedded_sgl_count;
53 /* Subroutine to find out contiguous free elem in IU */
55 uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
57 uint32_t contiguous_free_elem = 0;
62 contiguous_free_elem = (elem_in_q - pi);
64 contiguous_free_elem -= 1;
66 contiguous_free_elem = (ci - pi - 1);
71 return contiguous_free_elem;
74 /* Subroutine to find out num of elements need for the request */
76 pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
79 uint32_t num_elem_required = 1;
81 DBG_IO("SGL_Count :%d",SG_Count);
83 If SG_Count greater than max sg per IU i.e 4 or 68
84 (4 is with out spanning or 68 is with spanning) chaining is required.
85 OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
86 on these two cases one element is enough.
88 if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU)
89 return num_elem_required;
91 SGL Count Other Than First IU
93 num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU;
94 num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
96 return num_elem_required;
99 /* Subroutine to build SG list for the IU submission*/
101 boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
102 uint32_t num_elem_alloted)
105 uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
106 sgt_t *sgt = sg_array;
107 sgt_t *sg_chain = NULL;
108 boolean_t partial = false;
112 DBG_IO("SGL_Count :%d",num_sg);
117 if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) {
118 for (i = 0; i < num_sg; i++, sgt++) {
119 sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
120 sgt->len= OS_GET_IO_SG_LEN(rcb,i);
124 sg_array[num_sg - 1].flags = SG_FLAG_LAST;
129 sg_chain = rcb->sg_chain_virt;
130 sgt->addr = rcb->sg_chain_dma;
131 sgt->len = num_sg * sizeof(sgt_t);
132 sgt->flags = SG_FLAG_CHAIN;
135 for (i = 0; i < num_sg; i++, sgt++) {
136 sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
137 sgt->len = OS_GET_IO_SG_LEN(rcb,i);
141 sg_chain[num_sg - 1].flags = SG_FLAG_LAST;
147 iu_hdr->iu_length = num_sg * sizeof(sgt_t);
153 /*Subroutine used to Build the RAID request */
155 pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb,
156 pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
160 raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
161 raid_req->header.comp_feature = 0;
162 raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
163 raid_req->work_area[0] = 0;
164 raid_req->work_area[1] = 0;
165 raid_req->request_id = rcb->tag;
166 raid_req->nexus_id = 0;
167 raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
168 memcpy(raid_req->lun_number, rcb->dvp->scsi3addr,
169 sizeof(raid_req->lun_number));
170 raid_req->protocol_spec = 0;
171 raid_req->data_direction = rcb->data_dir;
172 raid_req->reserved1 = 0;
174 raid_req->error_index = raid_req->request_id;
175 raid_req->reserved2 = 0;
176 raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
177 raid_req->command_priority = 0;
178 raid_req->reserved3 = 0;
179 raid_req->reserved4 = 0;
180 raid_req->reserved5 = 0;
182 /* As cdb and additional_cdb_bytes are contiguous,
183 update them in a single statement */
184 memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
187 for(i = 0; i < rcb->cmdlen ; i++)
188 DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
191 switch (rcb->cmdlen) {
196 raid_req->additional_cdb_bytes_usage =
197 PQI_ADDITIONAL_CDB_BYTES_0;
200 raid_req->additional_cdb_bytes_usage =
201 PQI_ADDITIONAL_CDB_BYTES_4;
204 raid_req->additional_cdb_bytes_usage =
205 PQI_ADDITIONAL_CDB_BYTES_8;
208 raid_req->additional_cdb_bytes_usage =
209 PQI_ADDITIONAL_CDB_BYTES_12;
212 default: /* todo:review again */
213 raid_req->additional_cdb_bytes_usage =
214 PQI_ADDITIONAL_CDB_BYTES_16;
218 /* Frame SGL Descriptor */
219 raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
220 &raid_req->header, num_elem_alloted);
222 raid_req->header.iu_length +=
223 offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
226 DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
227 DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
228 DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id);
229 DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length);
230 DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute);
231 DBG_IO("raid_req->lun_number : 0x%x", raid_req->lun_number);
232 DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index);
233 DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
234 DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
235 DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
237 rcb->success_cmp_callback = pqisrc_process_io_response_success;
238 rcb->error_cmp_callback = pqisrc_process_raid_response_error;
239 rcb->resp_qid = raid_req->response_queue_id;
245 /*Subroutine used to Build the AIO request */
247 pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb,
248 pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
252 aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
253 aio_req->header.comp_feature = 0;
254 aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
255 aio_req->work_area[0] = 0;
256 aio_req->work_area[1] = 0;
257 aio_req->req_id = rcb->tag;
258 aio_req->res1[0] = 0;
259 aio_req->res1[1] = 0;
260 aio_req->nexus = rcb->ioaccel_handle;
261 aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
262 aio_req->data_dir = rcb->data_dir;
263 aio_req->mem_type = 0;
266 aio_req->task_attr = OS_GET_TASK_ATTR(rcb);
267 aio_req->cmd_prio = 0;
269 aio_req->err_idx = aio_req->req_id;
270 aio_req->cdb_len = rcb->cmdlen;
271 memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
274 for(int i = 0; i < rcb->cmdlen ; i++)
275 DBG_IO(" 0x%x \n",aio_req->cdb[i]);
277 memset(aio_req->lun,0,sizeof(aio_req->lun));
278 memset(aio_req->res4,0,sizeof(aio_req->res4));
280 if(rcb->encrypt_enable == true) {
281 aio_req->encrypt_enable = true;
282 aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
283 aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower);
284 aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper);
286 aio_req->encrypt_enable = 0;
287 aio_req->encrypt_key_index = 0;
288 aio_req->encrypt_twk_high = 0;
289 aio_req->encrypt_twk_low = 0;
292 /* Frame SGL Descriptor */
293 aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
294 &aio_req->header, num_elem_alloted);
296 aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
298 DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
300 aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) -
303 DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
304 DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid);
305 DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id);
306 DBG_IO("aio_req->nexus : 0x%x \n",aio_req->nexus);
307 DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len);
308 DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir);
309 DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr);
310 DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx);
311 DBG_IO("aio_req->num_sg :%d",aio_req->num_sg);
312 DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr);
313 DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
314 DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
317 rcb->success_cmp_callback = pqisrc_process_io_response_success;
318 rcb->error_cmp_callback = pqisrc_process_aio_response_error;
319 rcb->resp_qid = aio_req->response_queue_id;
325 /*Function used to build and send RAID/AIO */
326 int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
328 ib_queue_t *ib_q_array = softs->op_aio_ib_q;
329 ib_queue_t *ib_q = NULL;
331 IO_PATH_T io_path = AIO_PATH;
332 uint32_t TraverseCount = 0;
333 int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb);
334 int qindex = first_qindex;
335 uint32_t num_op_ib_q = softs->num_op_aio_ibq;
336 uint32_t num_elem_needed;
337 uint32_t num_elem_alloted = 0;
338 pqi_scsi_dev_t *devp = rcb->dvp;
339 uint8_t raidbypass_cdb[16];
344 rcb->cdbp = OS_GET_CDBP(rcb);
346 if(IS_AIO_PATH(devp)) {
347 /** IO for Physical Drive **/
348 /** Send in AIO PATH**/
349 rcb->ioaccel_handle = devp->ioaccel_handle;
351 int ret = PQI_STATUS_FAILURE;
352 /** IO for RAID Volume **/
353 if (devp->offload_enabled) {
354 /** ByPass IO ,Send in AIO PATH **/
355 ret = pqisrc_send_scsi_cmd_raidbypass(softs,
356 devp, rcb, raidbypass_cdb);
359 if (PQI_STATUS_FAILURE == ret) {
360 /** Send in RAID PATH **/
362 num_op_ib_q = softs->num_op_raid_ibq;
363 ib_q_array = softs->op_raid_ib_q;
365 rcb->cdbp = raidbypass_cdb;
369 num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
370 DBG_IO("num_elem_needed :%d",num_elem_needed);
373 uint32_t num_elem_available;
374 ib_q = (ib_q_array + qindex);
375 PQI_LOCK(&ib_q->lock);
376 num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
377 *(ib_q->ci_virt_addr), ib_q->num_elem);
379 DBG_IO("num_elem_avialable :%d\n",num_elem_available);
380 if(num_elem_available >= num_elem_needed) {
381 num_elem_alloted = num_elem_needed;
384 DBG_IO("Current queue is busy! Hop to next queue\n");
386 PQI_UNLOCK(&ib_q->lock);
387 qindex = (qindex + 1) % num_op_ib_q;
388 if(qindex == first_qindex) {
389 if (num_elem_needed == 1)
394 }while(TraverseCount < 2);
396 DBG_IO("num_elem_alloted :%d",num_elem_alloted);
397 if (num_elem_alloted == 0) {
398 DBG_WARN("OUT: IB Queues were full\n");
399 return PQI_STATUS_QFULL;
402 /* Get IB Queue Slot address to build IU */
403 ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
405 if(io_path == AIO_PATH) {
406 /** Build AIO structure **/
407 pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
410 /** Build RAID structure **/
411 pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
415 rcb->req_pending = true;
417 /* Update the local PI */
418 ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
420 DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local);
421 DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
423 /* Inform the fw about the new IU */
424 PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
426 PQI_UNLOCK(&ib_q->lock);
428 return PQI_STATUS_SUCCESS;
431 /* Subroutine used to set encryption info as part of RAID bypass IO*/
432 static inline void pqisrc_set_enc_info(
433 struct pqi_enc_info *enc_info, struct raid_map *raid_map,
434 uint64_t first_block)
436 uint32_t volume_blk_size;
439 * Set the encryption tweak values based on logical block address.
440 * If the block size is 512, the tweak value is equal to the LBA.
441 * For other block sizes, tweak value is (LBA * block size) / 512.
443 volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
444 if (volume_blk_size != 512)
445 first_block = (first_block * volume_blk_size) / 512;
447 enc_info->data_enc_key_index =
448 GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
449 enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
450 enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
455 * Attempt to perform offload RAID mapping for a logical volume I/O.
458 #define HPSA_RAID_0 0
459 #define HPSA_RAID_4 1
460 #define HPSA_RAID_1 2 /* also used for RAID 10 */
461 #define HPSA_RAID_5 3 /* also used for RAID 50 */
462 #define HPSA_RAID_51 4
463 #define HPSA_RAID_6 5 /* also used for RAID 60 */
464 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
465 #define HPSA_RAID_MAX HPSA_RAID_ADM
466 #define HPSA_RAID_UNKNOWN 0xff
468 /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
469 int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
476 *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) |
477 (cdb[2] << 8) | cdb[3]);
478 *blk_cnt = (uint32_t)cdb[4];
485 *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
486 *blk_cnt = (uint32_t)GET_BE16(&cdb[7]);
491 *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
492 *blk_cnt = GET_BE32(&cdb[6]);
497 *fst_blk = GET_BE64(&cdb[2]);
498 *blk_cnt = GET_BE32(&cdb[10]);
501 /* Process via normal I/O path. */
502 return PQI_STATUS_FAILURE;
504 return PQI_STATUS_SUCCESS;
508 * Function used to build and send RAID bypass request to the adapter
510 int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
511 pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
513 struct raid_map *raid_map;
514 boolean_t is_write = false;
516 uint64_t fst_blk, lst_blk;
517 uint32_t blk_cnt, blks_per_row;
518 uint64_t fst_row, lst_row;
519 uint32_t fst_row_offset, lst_row_offset;
520 uint32_t fst_col, lst_col;
521 uint32_t r5or6_blks_per_row;
522 uint64_t r5or6_fst_row, r5or6_lst_row;
523 uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset;
524 uint32_t r5or6_fst_col, r5or6_lst_col;
525 uint16_t data_disks_per_row, total_disks_per_row;
526 uint16_t layout_map_count;
529 uint32_t fst_grp, lst_grp, cur_grp;
532 uint32_t disk_blk_cnt;
534 int offload_to_mirror;
539 /* Check for eligible opcode, get LBA and block count. */
540 memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
542 for(i = 0; i < rcb->cmdlen ; i++)
543 DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
544 if(check_for_scsi_opcode(cdb, &is_write,
545 &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
546 return PQI_STATUS_FAILURE;
547 /* Check for write to non-RAID-0. */
548 if (is_write && device->raid_level != SA_RAID_0)
549 return PQI_STATUS_FAILURE;;
552 return PQI_STATUS_FAILURE;
554 lst_blk = fst_blk + blk_cnt - 1;
555 raid_map = device->raid_map;
557 /* Check for invalid block or wraparound. */
558 if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) ||
560 return PQI_STATUS_FAILURE;
562 data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row);
563 strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size));
564 layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count));
566 /* Calculate stripe information for the request. */
567 blks_per_row = data_disks_per_row * strip_sz;
569 /* use __udivdi3 ? */
570 fst_row = fst_blk / blks_per_row;
571 lst_row = lst_blk / blks_per_row;
572 fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row));
573 lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row));
574 fst_col = fst_row_offset / strip_sz;
575 lst_col = lst_row_offset / strip_sz;
577 /* If this isn't a single row/column then give to the controller. */
578 if (fst_row != lst_row || fst_col != lst_col)
579 return PQI_STATUS_FAILURE;
581 /* Proceeding with driver mapping. */
582 total_disks_per_row = data_disks_per_row +
583 GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row));
584 map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
585 GET_LE16((uint8_t *)(&raid_map->row_cnt));
586 map_idx = (map_row * total_disks_per_row) + fst_col;
589 if (device->raid_level == SA_RAID_1) {
590 if (device->offload_to_mirror)
591 map_idx += data_disks_per_row;
592 device->offload_to_mirror = !device->offload_to_mirror;
593 } else if (device->raid_level == SA_RAID_ADM) {
596 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
599 offload_to_mirror = device->offload_to_mirror;
600 if (offload_to_mirror == 0) {
601 /* use physical disk in the first mirrored group. */
602 map_idx %= data_disks_per_row;
606 * Determine mirror group that map_idx
609 cur_grp = map_idx / data_disks_per_row;
611 if (offload_to_mirror != cur_grp) {
613 layout_map_count - 1) {
615 * Select raid index from
618 map_idx += data_disks_per_row;
622 * Select raid index from first
625 map_idx %= data_disks_per_row;
629 } while (offload_to_mirror != cur_grp);
632 /* Set mirror group to use next time. */
634 (offload_to_mirror >= layout_map_count - 1) ?
635 0 : offload_to_mirror + 1;
636 if(offload_to_mirror >= layout_map_count)
637 return PQI_STATUS_FAILURE;
639 device->offload_to_mirror = offload_to_mirror;
641 * Avoid direct use of device->offload_to_mirror within this
642 * function since multiple threads might simultaneously
643 * increment it beyond the range of device->layout_map_count -1.
645 } else if ((device->raid_level == SA_RAID_5 ||
646 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
648 /* Verify first and last block are in same RAID group */
649 r5or6_blks_per_row = strip_sz * data_disks_per_row;
650 stripesz = r5or6_blks_per_row * layout_map_count;
652 fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row;
653 lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row;
655 if (fst_grp != lst_grp)
656 return PQI_STATUS_FAILURE;
658 /* Verify request is in a single row of RAID 5/6 */
659 fst_row = r5or6_fst_row =
661 r5or6_lst_row = lst_blk / stripesz;
663 if (r5or6_fst_row != r5or6_lst_row)
664 return PQI_STATUS_FAILURE;
666 /* Verify request is in a single column */
667 fst_row_offset = r5or6_fst_row_offset =
668 (uint32_t)((fst_blk % stripesz) %
671 r5or6_lst_row_offset =
672 (uint32_t)((lst_blk % stripesz) %
675 fst_col = r5or6_fst_row_offset / strip_sz;
676 r5or6_fst_col = fst_col;
677 r5or6_lst_col = r5or6_lst_row_offset / strip_sz;
679 if (r5or6_fst_col != r5or6_lst_col)
680 return PQI_STATUS_FAILURE;
682 /* Request is eligible */
684 ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
685 GET_LE16((uint8_t *)(&raid_map->row_cnt));
688 (GET_LE16((uint8_t *)(&raid_map->row_cnt)) *
689 total_disks_per_row)) +
690 (map_row * total_disks_per_row) + fst_col;
693 if (map_idx >= RAID_MAP_MAX_ENTRIES)
694 return PQI_STATUS_FAILURE;
696 rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
697 disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
699 (fst_row_offset - fst_col * strip_sz);
700 disk_blk_cnt = blk_cnt;
702 /* Handle differing logical/physical block sizes. */
703 if (raid_map->phys_blk_shift) {
704 disk_block <<= raid_map->phys_blk_shift;
705 disk_blk_cnt <<= raid_map->phys_blk_shift;
708 if (disk_blk_cnt > 0xffff)
709 return PQI_STATUS_FAILURE;
711 /* Build the new CDB for the physical disk I/O. */
712 if (disk_block > 0xffffffff) {
713 cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16;
715 PUT_BE64(disk_block, &cdb[2]);
716 PUT_BE32(disk_blk_cnt, &cdb[10]);
721 cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10;
723 PUT_BE32(disk_block, &cdb[2]);
725 PUT_BE16(disk_blk_cnt, &cdb[7]);
730 if (GET_LE16((uint8_t *)(&raid_map->flags)) &
731 RAID_MAP_ENCRYPTION_ENABLED) {
732 pqisrc_set_enc_info(&rcb->enc_info, raid_map,
734 rcb->encrypt_enable = true;
736 rcb->encrypt_enable = false;
739 rcb->cmdlen = cdb_length;
744 return PQI_STATUS_SUCCESS;
747 /* Function used to submit a TMF to the adater */
748 int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
749 rcb_t *rcb, int req_id, int tmf_type)
751 int rval = PQI_STATUS_SUCCESS;
752 pqi_tmf_req_t tmf_req;
754 memset(&tmf_req, 0, sizeof(pqi_tmf_req_t));
758 tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
759 tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
760 tmf_req.req_id = rcb->tag;
762 memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
763 tmf_req.tmf = tmf_type;
764 tmf_req.req_id_to_manage = req_id;
765 tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
766 tmf_req.obq_id_to_manage = rcb->resp_qid;
768 rcb->req_pending = true;
770 rval = pqisrc_submit_cmnd(softs,
771 &softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req);
772 if (rval != PQI_STATUS_SUCCESS) {
773 DBG_ERR("Unable to submit command rval=%d\n", rval);
777 rval = pqisrc_wait_on_condition(softs, rcb);
778 if (rval != PQI_STATUS_SUCCESS){
779 DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
780 rcb->status = REQUEST_FAILED;
783 if (rcb->status != REQUEST_SUCCESS) {
784 DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
785 "stat:0x%x\n", tmf_type, rcb->status);
786 rval = PQI_STATUS_FAILURE;