]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/smartpqi/smartpqi_request.c
Merge bmake-20201117
[FreeBSD/FreeBSD.git] / sys / dev / smartpqi / smartpqi_request.c
1 /*-
2  * Copyright (c) 2018 Microsemi Corporation.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 /* $FreeBSD$ */
28
29 #include "smartpqi_includes.h"
30
31 #define SG_FLAG_LAST    0x40000000
32 #define SG_FLAG_CHAIN   0x80000000
33
34 /* Subroutine to find out embedded sgl count in IU */
35 static inline
36 uint32_t pqisrc_embedded_sgl_count(uint32_t elem_alloted)
37 {
38         uint32_t embedded_sgl_count = MAX_EMBEDDED_SG_IN_FIRST_IU;
39         DBG_FUNC(" IN ");
40         /**
41         calculate embedded sgl count using num_elem_alloted for IO
42         **/
43         if(elem_alloted - 1)
44                 embedded_sgl_count += ((elem_alloted - 1) * MAX_EMBEDDED_SG_IN_IU);
45         DBG_IO("embedded_sgl_count :%d\n",embedded_sgl_count);
46
47         DBG_FUNC(" OUT ");
48
49         return embedded_sgl_count;
50
51 }
52
53 /* Subroutine to find out contiguous free elem in IU */
54 static inline
55 uint32_t pqisrc_contiguous_free_elem(uint32_t pi, uint32_t ci, uint32_t elem_in_q)
56 {
57         uint32_t contiguous_free_elem = 0;
58
59         DBG_FUNC(" IN ");
60
61         if(pi >= ci) {
62                 contiguous_free_elem = (elem_in_q - pi); 
63                 if(ci == 0)
64                         contiguous_free_elem -= 1;
65         } else {
66                 contiguous_free_elem = (ci - pi - 1);
67         }
68
69         DBG_FUNC(" OUT ");
70
71         return contiguous_free_elem;
72 }
73
74 /* Subroutine to find out num of elements need for the request */
75 static uint32_t
76 pqisrc_num_elem_needed(pqisrc_softstate_t *softs, uint32_t SG_Count)
77 {
78         uint32_t num_sg;
79         uint32_t num_elem_required = 1;
80         DBG_FUNC(" IN ");
81         DBG_IO("SGL_Count :%d",SG_Count);
82         /********
83         If SG_Count greater than max sg per IU i.e 4 or 68 
84         (4 is with out spanning or 68 is with spanning) chaining is required.
85         OR, If SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU then,
86         on these two cases one element is enough.
87         ********/
88         if(SG_Count > softs->max_sg_per_iu || SG_Count <= MAX_EMBEDDED_SG_IN_FIRST_IU)
89                 return num_elem_required;
90         /*
91         SGL Count Other Than First IU
92          */
93         num_sg = SG_Count - MAX_EMBEDDED_SG_IN_FIRST_IU;
94         num_elem_required += PQISRC_DIV_ROUND_UP(num_sg, MAX_EMBEDDED_SG_IN_IU);
95         DBG_FUNC(" OUT ");
96         return num_elem_required;
97 }
98
99 /* Subroutine to build SG list for the IU submission*/
100 static
101 boolean_t pqisrc_build_sgl(sgt_t *sg_array, rcb_t *rcb, iu_header_t *iu_hdr,
102                         uint32_t num_elem_alloted)
103 {
104         uint32_t i;
105         uint32_t num_sg = OS_GET_IO_SG_COUNT(rcb);
106         sgt_t *sgt = sg_array; 
107         sgt_t *sg_chain = NULL;
108         boolean_t partial = false;
109
110         DBG_FUNC(" IN ");
111
112         DBG_IO("SGL_Count :%d",num_sg);
113         if (0 == num_sg) {
114                 goto out;
115         }
116
117         if (num_sg <= pqisrc_embedded_sgl_count(num_elem_alloted)) {
118                 for (i = 0; i < num_sg; i++, sgt++) {
119                         sgt->addr= OS_GET_IO_SG_ADDR(rcb,i);
120                         sgt->len= OS_GET_IO_SG_LEN(rcb,i);
121                         sgt->flags= 0;
122                 }
123                 
124                 sg_array[num_sg - 1].flags = SG_FLAG_LAST;
125         } else {
126         /**
127         SGL Chaining
128         **/
129                 sg_chain = rcb->sg_chain_virt;
130                 sgt->addr = rcb->sg_chain_dma;
131                 sgt->len = num_sg * sizeof(sgt_t);
132                 sgt->flags = SG_FLAG_CHAIN;
133                 
134                 sgt = sg_chain;
135                 for (i = 0; i < num_sg; i++, sgt++) {
136                         sgt->addr = OS_GET_IO_SG_ADDR(rcb,i);
137                         sgt->len = OS_GET_IO_SG_LEN(rcb,i);
138                         sgt->flags = 0;
139                 }
140                 
141                 sg_chain[num_sg - 1].flags = SG_FLAG_LAST; 
142                 num_sg = 1;
143                 partial = true;
144         }
145 out:
146         iu_hdr->iu_length = num_sg * sizeof(sgt_t);
147         DBG_FUNC(" OUT ");
148         return partial;
149
150 }
151
152 /*Subroutine used to Build the RAID request */
153 static void 
154 pqisrc_build_raid_io(pqisrc_softstate_t *softs, rcb_t *rcb, 
155         pqisrc_raid_req_t *raid_req, uint32_t num_elem_alloted)
156 {
157         DBG_FUNC(" IN ");
158
159         raid_req->header.iu_type = PQI_IU_TYPE_RAID_PATH_IO_REQUEST;
160         raid_req->header.comp_feature = 0;
161         raid_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
162         raid_req->work_area[0] = 0;
163         raid_req->work_area[1] = 0;
164         raid_req->request_id = rcb->tag;
165         raid_req->nexus_id = 0;
166         raid_req->buffer_length = GET_SCSI_BUFFLEN(rcb);
167         memcpy(raid_req->lun_number, rcb->dvp->scsi3addr, 
168                 sizeof(raid_req->lun_number)); 
169         raid_req->protocol_spec = 0;
170         raid_req->data_direction = rcb->data_dir;
171         raid_req->reserved1 = 0;
172         raid_req->fence = 0;
173         raid_req->error_index = raid_req->request_id;
174         raid_req->reserved2 = 0;
175         raid_req->task_attribute = OS_GET_TASK_ATTR(rcb);
176         raid_req->command_priority = 0;
177         raid_req->reserved3 = 0;
178         raid_req->reserved4 = 0;
179         raid_req->reserved5 = 0;
180
181         /* As cdb and additional_cdb_bytes are contiguous, 
182            update them in a single statement */
183         memcpy(raid_req->cdb, rcb->cdbp, rcb->cmdlen);
184 #if 0
185         DBG_IO("CDB :");
186         for(i = 0; i < rcb->cmdlen ; i++)
187                 DBG_IO(" 0x%x \n ",raid_req->cdb[i]);
188 #endif
189
190         switch (rcb->cmdlen) {
191                 case 6:
192                 case 10:
193                 case 12:
194                 case 16:
195                         raid_req->additional_cdb_bytes_usage =
196                                 PQI_ADDITIONAL_CDB_BYTES_0;
197                         break;
198                 case 20:
199                         raid_req->additional_cdb_bytes_usage =
200                                 PQI_ADDITIONAL_CDB_BYTES_4;
201                         break;
202                 case 24:
203                         raid_req->additional_cdb_bytes_usage =
204                                 PQI_ADDITIONAL_CDB_BYTES_8;
205                         break;
206                 case 28:
207                         raid_req->additional_cdb_bytes_usage =
208                                 PQI_ADDITIONAL_CDB_BYTES_12;
209                         break;
210                 case 32:
211                 default: /* todo:review again */
212                         raid_req->additional_cdb_bytes_usage =
213                                 PQI_ADDITIONAL_CDB_BYTES_16;
214                         break;
215         }
216
217         /* Frame SGL Descriptor */
218         raid_req->partial = pqisrc_build_sgl(&raid_req->sg_descriptors[0], rcb,
219                 &raid_req->header, num_elem_alloted);                                                   
220
221         raid_req->header.iu_length += 
222                         offsetof(pqisrc_raid_req_t, sg_descriptors) - sizeof(iu_header_t);
223
224 #if 0
225         DBG_IO("raid_req->header.iu_type : 0x%x", raid_req->header.iu_type);
226         DBG_IO("raid_req->response_queue_id :%d\n"raid_req->response_queue_id);
227         DBG_IO("raid_req->request_id : 0x%x", raid_req->request_id);
228         DBG_IO("raid_req->buffer_length : 0x%x", raid_req->buffer_length);
229         DBG_IO("raid_req->task_attribute : 0x%x", raid_req->task_attribute);
230         DBG_IO("raid_req->lun_number  : 0x%x", raid_req->lun_number);
231         DBG_IO("raid_req->error_index : 0x%x", raid_req->error_index);
232         DBG_IO("raid_req->sg_descriptors[0].addr : %p", (void*)raid_req->sg_descriptors[0].addr);
233         DBG_IO("raid_req->sg_descriptors[0].len : 0x%x", raid_req->sg_descriptors[0].len);
234         DBG_IO("raid_req->sg_descriptors[0].flags : 0%x", raid_req->sg_descriptors[0].flags);
235 #endif  
236         rcb->success_cmp_callback = pqisrc_process_io_response_success; 
237         rcb->error_cmp_callback = pqisrc_process_raid_response_error; 
238         rcb->resp_qid = raid_req->response_queue_id;
239
240         DBG_FUNC(" OUT ");
241
242 }
243
244 /*Subroutine used to Build the AIO request */
245 static void
246 pqisrc_build_aio_io(pqisrc_softstate_t *softs, rcb_t *rcb, 
247                                 pqi_aio_req_t *aio_req, uint32_t num_elem_alloted)
248 {
249         DBG_FUNC(" IN ");
250
251         aio_req->header.iu_type = PQI_IU_TYPE_AIO_PATH_IO_REQUEST;
252         aio_req->header.comp_feature = 0;
253         aio_req->response_queue_id = OS_GET_IO_RESP_QID(softs, rcb);
254         aio_req->work_area[0] = 0;
255         aio_req->work_area[1] = 0;
256         aio_req->req_id = rcb->tag;
257         aio_req->res1[0] = 0;
258         aio_req->res1[1] = 0;
259         aio_req->nexus = rcb->ioaccel_handle;
260         aio_req->buf_len = GET_SCSI_BUFFLEN(rcb);
261         aio_req->data_dir = rcb->data_dir;
262         aio_req->mem_type = 0;
263         aio_req->fence = 0;
264         aio_req->res2 = 0;
265         aio_req->task_attr = OS_GET_TASK_ATTR(rcb); 
266         aio_req->cmd_prio = 0;
267         aio_req->res3 = 0;
268         aio_req->err_idx = aio_req->req_id;
269         aio_req->cdb_len = rcb->cmdlen;
270         if(rcb->cmdlen > sizeof(aio_req->cdb))
271                 rcb->cmdlen = sizeof(aio_req->cdb);
272         memcpy(aio_req->cdb, rcb->cdbp, rcb->cmdlen);
273 #if 0
274         DBG_IO("CDB : \n");
275         for(int i = 0; i < rcb->cmdlen ; i++)
276                  DBG_IO(" 0x%x \n",aio_req->cdb[i]);
277 #endif
278         memset(aio_req->lun,0,sizeof(aio_req->lun));
279         memset(aio_req->res4,0,sizeof(aio_req->res4));
280
281         if(rcb->encrypt_enable == true) {
282                 aio_req->encrypt_enable = true;
283                 aio_req->encrypt_key_index = LE_16(rcb->enc_info.data_enc_key_index);
284                 aio_req->encrypt_twk_low = LE_32(rcb->enc_info.encrypt_tweak_lower);
285                 aio_req->encrypt_twk_high = LE_32(rcb->enc_info.encrypt_tweak_upper);
286         } else {
287                 aio_req->encrypt_enable = 0;
288                 aio_req->encrypt_key_index = 0;
289                 aio_req->encrypt_twk_high = 0;
290                 aio_req->encrypt_twk_low = 0;
291         }       
292
293         /* Frame SGL Descriptor */
294         aio_req->partial = pqisrc_build_sgl(&aio_req->sg_desc[0], rcb,
295                 &aio_req->header, num_elem_alloted);
296
297         aio_req->num_sg = aio_req->header.iu_length / sizeof(sgt_t);
298
299         DBG_INFO("aio_req->num_sg :%d",aio_req->num_sg);
300
301         aio_req->header.iu_length += offsetof(pqi_aio_req_t, sg_desc) - 
302                 sizeof(iu_header_t);
303 #if 0
304         DBG_IO("aio_req->header.iu_type : 0x%x \n",aio_req->header.iu_type);
305         DBG_IO("aio_req->resp_qid :0x%x",aio_req->resp_qid);
306         DBG_IO("aio_req->req_id : 0x%x \n",aio_req->req_id);
307         DBG_IO("aio_req->nexus : 0x%x  \n",aio_req->nexus);
308         DBG_IO("aio_req->buf_len : 0x%x \n",aio_req->buf_len);
309         DBG_IO("aio_req->data_dir : 0x%x \n",aio_req->data_dir);
310         DBG_IO("aio_req->task_attr : 0x%x \n",aio_req->task_attr);
311         DBG_IO("aio_req->err_idx : 0x%x \n",aio_req->err_idx);
312         DBG_IO("aio_req->num_sg :%d",aio_req->num_sg);
313         DBG_IO("aio_req->sg_desc[0].addr : %p \n", (void*)aio_req->sg_desc[0].addr);
314         DBG_IO("aio_req->sg_desc[0].len : 0%x \n", aio_req->sg_desc[0].len);
315         DBG_IO("aio_req->sg_desc[0].flags : 0%x \n", aio_req->sg_desc[0].flags);
316 #endif
317
318         rcb->success_cmp_callback = pqisrc_process_io_response_success; 
319         rcb->error_cmp_callback = pqisrc_process_aio_response_error; 
320         rcb->resp_qid = aio_req->response_queue_id;
321
322         DBG_FUNC(" OUT ");
323
324 }
325
326 /*Function used to build and send RAID/AIO */
327 int pqisrc_build_send_io(pqisrc_softstate_t *softs,rcb_t *rcb)
328 {
329         ib_queue_t *ib_q_array = softs->op_aio_ib_q;
330         ib_queue_t *ib_q = NULL;
331         char *ib_iu = NULL;     
332         IO_PATH_T io_path = AIO_PATH;
333         uint32_t TraverseCount = 0; 
334         int first_qindex = OS_GET_IO_REQ_QINDEX(softs, rcb); 
335         int qindex = first_qindex;
336         uint32_t num_op_ib_q = softs->num_op_aio_ibq;
337         uint32_t num_elem_needed;
338         uint32_t num_elem_alloted = 0;
339         pqi_scsi_dev_t *devp = rcb->dvp;
340         uint8_t raidbypass_cdb[16];
341
342         DBG_FUNC(" IN ");
343
344         rcb->cdbp = OS_GET_CDBP(rcb);
345
346         if(IS_AIO_PATH(devp)) {
347                 /**  IO for Physical Drive  **/
348                 /** Send in AIO PATH**/
349                 rcb->ioaccel_handle = devp->ioaccel_handle;
350         } else {
351                 int ret = PQI_STATUS_FAILURE;
352                 /** IO for RAID Volume **/
353                 if (devp->offload_enabled) {
354                         /** ByPass IO ,Send in AIO PATH **/
355                         ret = pqisrc_send_scsi_cmd_raidbypass(softs, 
356                                 devp, rcb, raidbypass_cdb);
357                 }
358                 
359                 if (PQI_STATUS_FAILURE == ret) {
360                         /** Send in RAID PATH **/
361                         io_path = RAID_PATH;
362                         num_op_ib_q = softs->num_op_raid_ibq;
363                         ib_q_array = softs->op_raid_ib_q;
364                 } else {
365                         rcb->cdbp = raidbypass_cdb;
366                 }
367         }
368
369         num_elem_needed = pqisrc_num_elem_needed(softs, OS_GET_IO_SG_COUNT(rcb));
370         DBG_IO("num_elem_needed :%d",num_elem_needed);
371
372         do {
373                 uint32_t num_elem_available;
374                 ib_q = (ib_q_array + qindex);
375                 PQI_LOCK(&ib_q->lock);  
376                 num_elem_available = pqisrc_contiguous_free_elem(ib_q->pi_local,
377                                         *(ib_q->ci_virt_addr), ib_q->num_elem);
378                 
379                 DBG_IO("num_elem_avialable :%d\n",num_elem_available);
380                 if(num_elem_available >= num_elem_needed) {
381                         num_elem_alloted = num_elem_needed;
382                         break;
383                 }
384                 DBG_IO("Current queue is busy! Hop to next queue\n");
385
386                 PQI_UNLOCK(&ib_q->lock);        
387                 qindex = (qindex + 1) % num_op_ib_q;
388                 if(qindex == first_qindex) {
389                         if (num_elem_needed == 1)
390                                 break;
391                         TraverseCount += 1;
392                         num_elem_needed = 1;
393                 }
394         }while(TraverseCount < 2);
395
396         DBG_IO("num_elem_alloted :%d",num_elem_alloted);
397         if (num_elem_alloted == 0) {
398                 DBG_WARN("OUT: IB Queues were full\n");
399                 return PQI_STATUS_QFULL;
400         }       
401
402         /* Get IB Queue Slot address to build IU */
403         ib_iu = ib_q->array_virt_addr + (ib_q->pi_local * ib_q->elem_size);
404
405         if(io_path == AIO_PATH) {
406                 /** Build AIO structure **/
407                 pqisrc_build_aio_io(softs, rcb, (pqi_aio_req_t*)ib_iu,
408                         num_elem_alloted);
409         } else {
410                 /** Build RAID structure **/
411                 pqisrc_build_raid_io(softs, rcb, (pqisrc_raid_req_t*)ib_iu,
412                         num_elem_alloted);
413         }
414
415         rcb->req_pending = true;
416
417         /* Update the local PI */
418         ib_q->pi_local = (ib_q->pi_local + num_elem_alloted) % ib_q->num_elem;
419
420         DBG_INFO("ib_q->pi_local : %x\n", ib_q->pi_local);
421         DBG_INFO("*ib_q->ci_virt_addr: %x\n",*(ib_q->ci_virt_addr));
422
423         /* Inform the fw about the new IU */
424         PCI_MEM_PUT32(softs, ib_q->pi_register_abs, ib_q->pi_register_offset, ib_q->pi_local);
425
426         PQI_UNLOCK(&ib_q->lock);        
427         DBG_FUNC(" OUT ");
428         return PQI_STATUS_SUCCESS;
429 }
430
431 /* Subroutine used to set encryption info as part of RAID bypass IO*/
432 static inline void pqisrc_set_enc_info(
433         struct pqi_enc_info *enc_info, struct raid_map *raid_map,
434         uint64_t first_block)
435 {
436         uint32_t volume_blk_size;
437
438         /*
439          * Set the encryption tweak values based on logical block address.
440          * If the block size is 512, the tweak value is equal to the LBA.
441          * For other block sizes, tweak value is (LBA * block size) / 512.
442          */
443         volume_blk_size = GET_LE32((uint8_t *)&raid_map->volume_blk_size);
444         if (volume_blk_size != 512)
445                 first_block = (first_block * volume_blk_size) / 512;
446
447         enc_info->data_enc_key_index =
448                 GET_LE16((uint8_t *)&raid_map->data_encryption_key_index);
449         enc_info->encrypt_tweak_upper = ((uint32_t)(((first_block) >> 16) >> 16));
450         enc_info->encrypt_tweak_lower = ((uint32_t)(first_block));
451 }
452
453 /*
454  * Attempt to perform offload RAID mapping for a logical volume I/O.
455  */
456
457 #define HPSA_RAID_0             0
458 #define HPSA_RAID_4             1
459 #define HPSA_RAID_1             2       /* also used for RAID 10 */
460 #define HPSA_RAID_5             3       /* also used for RAID 50 */
461 #define HPSA_RAID_51            4
462 #define HPSA_RAID_6             5       /* also used for RAID 60 */
463 #define HPSA_RAID_ADM           6       /* also used for RAID 1+0 ADM */
464 #define HPSA_RAID_MAX           HPSA_RAID_ADM
465 #define HPSA_RAID_UNKNOWN       0xff
466
467 /* Subroutine used to parse the scsi opcode and build the CDB for RAID bypass*/
468 int check_for_scsi_opcode(uint8_t *cdb, boolean_t *is_write, uint64_t *fst_blk,
469                                 uint32_t *blk_cnt) {
470         switch (cdb[0]) {
471         case SCMD_WRITE_6:
472                 *is_write = true;
473         case SCMD_READ_6:
474                 *fst_blk = (uint64_t)(((cdb[1] & 0x1F) << 16) |
475                                 (cdb[2] << 8) | cdb[3]);
476                 *blk_cnt = (uint32_t)cdb[4];
477                 if (*blk_cnt == 0)
478                         *blk_cnt = 256;
479                 break;
480         case SCMD_WRITE_10:
481                 *is_write = true;
482         case SCMD_READ_10:
483                 *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
484                 *blk_cnt = (uint32_t)GET_BE16(&cdb[7]);
485                 break;
486         case SCMD_WRITE_12:
487                 *is_write = true;
488         case SCMD_READ_12:
489                 *fst_blk = (uint64_t)GET_BE32(&cdb[2]);
490                 *blk_cnt = GET_BE32(&cdb[6]);
491                 break;
492         case SCMD_WRITE_16:
493                 *is_write = true;
494         case SCMD_READ_16:
495                 *fst_blk = GET_BE64(&cdb[2]);
496                 *blk_cnt = GET_BE32(&cdb[10]);
497                 break;
498         default:
499                 /* Process via normal I/O path. */
500                 return PQI_STATUS_FAILURE;
501         }
502         return PQI_STATUS_SUCCESS;
503 }
504
505 /*
506  * Function used to build and send RAID bypass request to the adapter
507  */
508 int pqisrc_send_scsi_cmd_raidbypass(pqisrc_softstate_t *softs,
509                                 pqi_scsi_dev_t *device, rcb_t *rcb, uint8_t *cdb)
510 {
511         struct raid_map *raid_map;
512         boolean_t is_write = false;
513         uint32_t map_idx;
514         uint64_t fst_blk, lst_blk;
515         uint32_t blk_cnt, blks_per_row;
516         uint64_t fst_row, lst_row;
517         uint32_t fst_row_offset, lst_row_offset;
518         uint32_t fst_col, lst_col;
519         uint32_t r5or6_blks_per_row;
520         uint64_t r5or6_fst_row, r5or6_lst_row;
521         uint32_t r5or6_fst_row_offset, r5or6_lst_row_offset;
522         uint32_t r5or6_fst_col, r5or6_lst_col;
523         uint16_t data_disks_per_row, total_disks_per_row;
524         uint16_t layout_map_count;
525         uint32_t stripesz;
526         uint16_t strip_sz;
527         uint32_t fst_grp, lst_grp, cur_grp;
528         uint32_t map_row;
529         uint64_t disk_block;
530         uint32_t disk_blk_cnt;
531         uint8_t cdb_length;
532         int offload_to_mirror;
533         int i;
534         DBG_FUNC(" IN \n");
535         DBG_IO("!!!!!\n");
536
537         /* Check for eligible opcode, get LBA and block count. */
538         memcpy(cdb, OS_GET_CDBP(rcb), rcb->cmdlen);
539
540         for(i = 0; i < rcb->cmdlen ; i++)
541                 DBG_IO(" CDB [ %d ] : %x\n",i,cdb[i]);
542         if(check_for_scsi_opcode(cdb, &is_write, 
543                 &fst_blk, &blk_cnt) == PQI_STATUS_FAILURE)
544                         return PQI_STATUS_FAILURE;
545         /* Check for write to non-RAID-0. */
546         if (is_write && device->raid_level != SA_RAID_0)
547                 return PQI_STATUS_FAILURE;
548
549         if(blk_cnt == 0) 
550                 return PQI_STATUS_FAILURE;
551
552         lst_blk = fst_blk + blk_cnt - 1;
553         raid_map = device->raid_map;
554
555         /* Check for invalid block or wraparound. */
556         if (lst_blk >= GET_LE64((uint8_t *)&raid_map->volume_blk_cnt) ||
557                 lst_blk < fst_blk)
558                 return PQI_STATUS_FAILURE;
559
560         data_disks_per_row = GET_LE16((uint8_t *)&raid_map->data_disks_per_row);
561         strip_sz = GET_LE16((uint8_t *)(&raid_map->strip_size));
562         layout_map_count = GET_LE16((uint8_t *)(&raid_map->layout_map_count));
563
564         /* Calculate stripe information for the request. */
565         blks_per_row = data_disks_per_row * strip_sz;
566         if (!blks_per_row)
567                 return PQI_STATUS_FAILURE;
568         /* use __udivdi3 ? */
569         fst_row = fst_blk / blks_per_row;
570         lst_row = lst_blk / blks_per_row;
571         fst_row_offset = (uint32_t)(fst_blk - (fst_row * blks_per_row));
572         lst_row_offset = (uint32_t)(lst_blk - (lst_row * blks_per_row));
573         fst_col = fst_row_offset / strip_sz;
574         lst_col = lst_row_offset / strip_sz;
575
576         /* If this isn't a single row/column then give to the controller. */
577         if (fst_row != lst_row || fst_col != lst_col)
578                 return PQI_STATUS_FAILURE;
579
580         /* Proceeding with driver mapping. */
581         total_disks_per_row = data_disks_per_row +
582                 GET_LE16((uint8_t *)(&raid_map->metadata_disks_per_row));
583         map_row = ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
584                 GET_LE16((uint8_t *)(&raid_map->row_cnt));
585         map_idx = (map_row * total_disks_per_row) + fst_col;
586
587         /* RAID 1 */
588         if (device->raid_level == SA_RAID_1) {
589                 if (device->offload_to_mirror)
590                         map_idx += data_disks_per_row;
591                 device->offload_to_mirror = !device->offload_to_mirror;
592         } else if (device->raid_level == SA_RAID_ADM) {
593                 /* RAID ADM */
594                 /*
595                  * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
596                  * divisible by 3.
597                  */
598                 offload_to_mirror = device->offload_to_mirror;
599                 if (offload_to_mirror == 0)  {
600                         /* use physical disk in the first mirrored group. */
601                         map_idx %= data_disks_per_row;
602                 } else {
603                         do {
604                                 /*
605                                  * Determine mirror group that map_idx
606                                  * indicates.
607                                  */
608                                 cur_grp = map_idx / data_disks_per_row;
609
610                                 if (offload_to_mirror != cur_grp) {
611                                         if (cur_grp <
612                                                 layout_map_count - 1) {
613                                                 /*
614                                                  * Select raid index from
615                                                  * next group.
616                                                  */
617                                                 map_idx += data_disks_per_row;
618                                                 cur_grp++;
619                                         } else {
620                                                 /*
621                                                  * Select raid index from first
622                                                  * group.
623                                                  */
624                                                 map_idx %= data_disks_per_row;
625                                                 cur_grp = 0;
626                                         }
627                                 }
628                         } while (offload_to_mirror != cur_grp);
629                 }
630
631                 /* Set mirror group to use next time. */
632                 offload_to_mirror =
633                         (offload_to_mirror >= layout_map_count - 1) ?
634                                 0 : offload_to_mirror + 1;
635                 if(offload_to_mirror >= layout_map_count)
636                         return PQI_STATUS_FAILURE;
637
638                 device->offload_to_mirror = offload_to_mirror;
639                 /*
640                  * Avoid direct use of device->offload_to_mirror within this
641                  * function since multiple threads might simultaneously
642                  * increment it beyond the range of device->layout_map_count -1.
643                  */
644         } else if ((device->raid_level == SA_RAID_5 ||
645                 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
646                 /* RAID 50/60 */
647                 /* Verify first and last block are in same RAID group */
648                 r5or6_blks_per_row = strip_sz * data_disks_per_row;
649                 stripesz = r5or6_blks_per_row * layout_map_count;
650
651                 fst_grp = (fst_blk % stripesz) / r5or6_blks_per_row;
652                 lst_grp = (lst_blk % stripesz) / r5or6_blks_per_row;
653
654                 if (fst_grp != lst_grp)
655                         return PQI_STATUS_FAILURE;
656
657                 /* Verify request is in a single row of RAID 5/6 */
658                 fst_row = r5or6_fst_row =
659                         fst_blk / stripesz;
660                 r5or6_lst_row = lst_blk / stripesz;
661
662                 if (r5or6_fst_row != r5or6_lst_row)
663                         return PQI_STATUS_FAILURE;
664
665                 /* Verify request is in a single column */
666                 fst_row_offset = r5or6_fst_row_offset =
667                         (uint32_t)((fst_blk % stripesz) %
668                         r5or6_blks_per_row);
669
670                 r5or6_lst_row_offset =
671                         (uint32_t)((lst_blk % stripesz) %
672                         r5or6_blks_per_row);
673
674                 fst_col = r5or6_fst_row_offset / strip_sz;
675                 r5or6_fst_col = fst_col;
676                 r5or6_lst_col = r5or6_lst_row_offset / strip_sz;
677
678                 if (r5or6_fst_col != r5or6_lst_col)
679                         return PQI_STATUS_FAILURE;
680
681                 /* Request is eligible */
682                 map_row =
683                         ((uint32_t)(fst_row >> raid_map->parity_rotation_shift)) %
684                         GET_LE16((uint8_t *)(&raid_map->row_cnt));
685
686                 map_idx = (fst_grp *
687                         (GET_LE16((uint8_t *)(&raid_map->row_cnt)) *
688                         total_disks_per_row)) +
689                         (map_row * total_disks_per_row) + fst_col;
690         }
691
692         if (map_idx >= RAID_MAP_MAX_ENTRIES)
693                 return PQI_STATUS_FAILURE;
694
695         rcb->ioaccel_handle = raid_map->dev_data[map_idx].ioaccel_handle;
696         disk_block = GET_LE64((uint8_t *)(&raid_map->disk_starting_blk)) +
697                 fst_row * strip_sz +
698                 (fst_row_offset - fst_col * strip_sz);
699         disk_blk_cnt = blk_cnt;
700
701         /* Handle differing logical/physical block sizes. */
702         if (raid_map->phys_blk_shift) {
703                 disk_block <<= raid_map->phys_blk_shift;
704                 disk_blk_cnt <<= raid_map->phys_blk_shift;
705         }
706
707         if (disk_blk_cnt > 0xffff)
708                 return PQI_STATUS_FAILURE;
709
710         /* Build the new CDB for the physical disk I/O. */
711         if (disk_block > 0xffffffff) {
712                 cdb[0] = is_write ? SCMD_WRITE_16 : SCMD_READ_16;
713                 cdb[1] = 0;
714                 PUT_BE64(disk_block, &cdb[2]);
715                 PUT_BE32(disk_blk_cnt, &cdb[10]);
716                 cdb[14] = 0;
717                 cdb[15] = 0;
718                 cdb_length = 16;
719         } else {
720                 cdb[0] = is_write ? SCMD_WRITE_10 : SCMD_READ_10;
721                 cdb[1] = 0;
722                 PUT_BE32(disk_block, &cdb[2]);
723                 cdb[6] = 0;
724                 PUT_BE16(disk_blk_cnt, &cdb[7]);
725                 cdb[9] = 0;
726                 cdb_length = 10;
727         }
728
729         if (GET_LE16((uint8_t *)(&raid_map->flags)) &
730                 RAID_MAP_ENCRYPTION_ENABLED) {
731                 pqisrc_set_enc_info(&rcb->enc_info, raid_map,
732                         fst_blk);
733                 rcb->encrypt_enable = true;
734         } else {
735                 rcb->encrypt_enable = false;
736         }
737
738         rcb->cmdlen = cdb_length;
739
740                 
741         DBG_FUNC("OUT");
742
743         return PQI_STATUS_SUCCESS;
744 }
745
746 /* Function used to submit a TMF to the adater */
747 int pqisrc_send_tmf(pqisrc_softstate_t *softs, pqi_scsi_dev_t *devp,
748                     rcb_t *rcb, int req_id, int tmf_type)
749 {
750         int rval = PQI_STATUS_SUCCESS;
751         pqi_tmf_req_t tmf_req;
752
753         memset(&tmf_req, 0, sizeof(pqi_tmf_req_t));
754
755         DBG_FUNC("IN");
756
757         tmf_req.header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
758         tmf_req.header.iu_length = sizeof(tmf_req) - sizeof(iu_header_t);
759         tmf_req.req_id = rcb->tag;
760
761         memcpy(tmf_req.lun, devp->scsi3addr, sizeof(tmf_req.lun));
762         tmf_req.tmf = tmf_type;
763         tmf_req.req_id_to_manage = req_id;
764         tmf_req.resp_qid = OS_GET_TMF_RESP_QID(softs, rcb);
765         tmf_req.obq_id_to_manage = rcb->resp_qid;
766
767         rcb->req_pending = true;
768
769         rval = pqisrc_submit_cmnd(softs,
770         &softs->op_raid_ib_q[OS_GET_TMF_REQ_QINDEX(softs, rcb)], &tmf_req);
771         if (rval != PQI_STATUS_SUCCESS) {
772                 DBG_ERR("Unable to submit command rval=%d\n", rval);
773                 return rval;
774         }
775
776         rval = pqisrc_wait_on_condition(softs, rcb);
777         if (rval != PQI_STATUS_SUCCESS){
778                 DBG_ERR("Task Management tmf_type : %d timeout\n", tmf_type);
779                 rcb->status = REQUEST_FAILED;
780         }
781
782         if (rcb->status  != REQUEST_SUCCESS) {
783                 DBG_ERR_BTL(devp, "Task Management failed tmf_type:%d "
784                                 "stat:0x%x\n", tmf_type, rcb->status);
785                 rval = PQI_STATUS_FAILURE;
786         }
787
788         DBG_FUNC("OUT");
789         return rval;
790 }