2 * Copyright (c) 2014, LSI Corp.
5 * Support: freebsdraid@lsi.com
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 * ATTN: MegaRaid FreeBSD
44 #include <sys/cdefs.h>
45 __FBSDID("$FreeBSD$");
47 #include <dev/mrsas/mrsas.h>
50 #include <cam/cam_ccb.h>
51 #include <cam/cam_sim.h>
52 #include <cam/cam_xpt_sim.h>
53 #include <cam/cam_debug.h>
54 #include <cam/cam_periph.h>
55 #include <cam/cam_xpt_periph.h>
61 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
62 u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
63 u_int64_t block, u_int32_t count);
64 u_int8_t MR_BuildRaidContext(struct mrsas_softc *sc,
65 struct IO_REQUEST_INFO *io_info,
66 RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
67 u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
68 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
69 RAID_CONTEXT *pRAID_Context,
70 MR_FW_RAID_MAP_ALL *map);
71 u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
72 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map);
73 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
74 u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
75 struct IO_REQUEST_INFO *io_info);
76 u_int32_t mega_mod64(u_int64_t dividend, u_int32_t divisor);
77 u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
78 MR_FW_RAID_MAP_ALL *map, int *div_error);
79 u_int64_t mega_div64_32(u_int64_t dividend, u_int32_t divisor);
80 void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
81 PLD_LOAD_BALANCE_INFO lbInfo);
82 void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request,
83 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
84 MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
85 u_int32_t ld_block_size);
86 static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
87 MR_FW_RAID_MAP_ALL *map);
88 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map);
89 static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm,
90 MR_FW_RAID_MAP_ALL *map);
91 static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span,
92 MR_FW_RAID_MAP_ALL *map);
93 static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx,
94 MR_FW_RAID_MAP_ALL *map);
95 static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld,
96 MR_FW_RAID_MAP_ALL *map);
97 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
100 * Spanset related function prototypes
101 * Added for PRL11 configuration (Uneven span support)
103 void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo);
104 static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld,
105 u_int64_t stripRow, u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
106 RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map);
107 static u_int64_t get_row_from_strip(struct mrsas_softc *sc, u_int32_t ld,
108 u_int64_t strip, MR_FW_RAID_MAP_ALL *map);
109 static u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc,
110 u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
111 MR_FW_RAID_MAP_ALL *map, int *div_error);
112 static u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span,
113 u_int64_t stripe, MR_FW_RAID_MAP_ALL *map);
117 * Spanset related defines
118 * Added for PRL11 configuration(Uneven span support)
120 #define SPAN_ROW_SIZE(map, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowSize
121 #define SPAN_ROW_DATA_SIZE(map_, ld, index_) MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize
122 #define SPAN_INVALID 0xff
129 typedef u_int64_t REGION_KEY;
130 typedef u_int32_t REGION_LEN;
132 #define MR_LD_STATE_OPTIMAL 3
141 #define ABS_DIFF(a,b) ( ((a) > (b)) ? ((a) - (b)) : ((b) - (a)) )
145 (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \
146 (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \
147 (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \
148 (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) ))
152 * In-line functions for mod and divide of 64-bit dividend and 32-bit divisor.
153 * Assumes a check for a divisor of zero is not possible.
155 * @param dividend : Dividend
156 * @param divisor : Divisor
160 #define mega_mod64(dividend, divisor) ({ \
162 remainder = ((u_int64_t) (dividend)) % (u_int32_t) (divisor); \
165 #define mega_div64_32(dividend, divisor) ({ \
167 quotient = ((u_int64_t) (dividend)) / (u_int32_t) (divisor); \
172 * Various RAID map access functions. These functions access the various
173 * parts of the RAID map and returns the appropriate parameters.
176 MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
178 return (&map->raidMap.ldSpanMap[ld].ldRaid);
181 u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
183 return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
186 static u_int16_t MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
188 return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef;
191 static u_int8_t MR_LdDataArmGet(u_int32_t ld, u_int32_t armIdx, MR_FW_RAID_MAP_ALL *map)
193 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
196 static u_int16_t MR_PdDevHandleGet(u_int32_t pd, MR_FW_RAID_MAP_ALL *map)
198 return map->raidMap.devHndlInfo[pd].curDevHdl;
201 static u_int16_t MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_FW_RAID_MAP_ALL *map)
203 return map->raidMap.arMapInfo[ar].pd[arm];
206 static MR_LD_SPAN *MR_LdSpanPtrGet(u_int32_t ld, u_int32_t span, MR_FW_RAID_MAP_ALL *map)
208 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
211 static MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
213 return &map->raidMap.ldSpanMap[ld].spanBlock[0];
216 u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
218 return map->raidMap.ldTgtIdToLd[ldTgtId];
221 u_int32_t MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_FW_RAID_MAP_ALL *map)
224 u_int32_t ld, ldBlockSize = MRSAS_SCSIBLOCKSIZE;
226 ld = MR_TargetIdToLdGet(ldTgtId, map);
229 * Check if logical drive was removed.
231 if (ld >= MAX_LOGICAL_DRIVES)
234 raid = MR_LdRaidGet(ld, map);
235 ldBlockSize = raid->logicalBlockLength;
237 ldBlockSize = MRSAS_SCSIBLOCKSIZE;
243 * MR_ValidateMapInfo: Validate RAID map
244 * input: Adapter instance soft state
246 * This function checks and validates the loaded RAID map. It returns 0 if
247 * successful, and 1 otherwise.
249 u_int8_t MR_ValidateMapInfo(struct mrsas_softc *sc)
254 uint32_t total_map_sz;
255 MR_FW_RAID_MAP_ALL *map = sc->raidmap_mem[(sc->map_id & 1)];
256 MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
257 PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) &sc->log_to_span;
259 total_map_sz = (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP) +
260 (sizeof(MR_LD_SPAN_MAP) * pFwRaidMap->ldCount));
262 if (pFwRaidMap->totalSize != total_map_sz) {
263 device_printf(sc->mrsas_dev, "map size %x not matching ld count\n", total_map_sz);
264 device_printf(sc->mrsas_dev, "span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP));
265 device_printf(sc->mrsas_dev, "pFwRaidMap->totalSize=%x\n", pFwRaidMap->totalSize);
269 if (sc->UnevenSpanSupport) {
270 mr_update_span_set(map, ldSpanInfo);
273 mrsas_update_load_balance_params(map, sc->load_balance_info);
279 * ******************************************************************************
281 * Function to print info about span set created in driver from FW raid map
285 * ldSpanInfo - ldSpanInfo per HBA instance
290 static int getSpanInfo(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
296 LD_SPAN_SET *span_set;
297 MR_QUAD_ELEMENT *quad;
301 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
303 ld = MR_TargetIdToLdGet(ldCount, map);
304 if (ld >= MAX_LOGICAL_DRIVES) {
307 raid = MR_LdRaidGet(ld, map);
308 printf("LD %x: span_depth=%x\n", ld, raid->spanDepth);
309 for (span=0; span<raid->spanDepth; span++)
310 printf("Span=%x, number of quads=%x\n", span,
311 map->raidMap.ldSpanMap[ld].spanBlock[span].
312 block_span_info.noElements);
313 for (element=0; element < MAX_QUAD_DEPTH; element++) {
314 span_set = &(ldSpanInfo[ld].span_set[element]);
315 if (span_set->span_row_data_width == 0) break;
317 printf(" Span Set %x: width=%x, diff=%x\n", element,
318 (unsigned int)span_set->span_row_data_width,
319 (unsigned int)span_set->diff);
320 printf(" logical LBA start=0x%08lx, end=0x%08lx\n",
321 (long unsigned int)span_set->log_start_lba,
322 (long unsigned int)span_set->log_end_lba);
323 printf(" span row start=0x%08lx, end=0x%08lx\n",
324 (long unsigned int)span_set->span_row_start,
325 (long unsigned int)span_set->span_row_end);
326 printf(" data row start=0x%08lx, end=0x%08lx\n",
327 (long unsigned int)span_set->data_row_start,
328 (long unsigned int)span_set->data_row_end);
329 printf(" data strip start=0x%08lx, end=0x%08lx\n",
330 (long unsigned int)span_set->data_strip_start,
331 (long unsigned int)span_set->data_strip_end);
333 for (span=0; span<raid->spanDepth; span++) {
334 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
335 block_span_info.noElements >=element+1){
336 quad = &map->raidMap.ldSpanMap[ld].
337 spanBlock[span].block_span_info.
339 printf(" Span=%x, Quad=%x, diff=%x\n", span,
340 element, quad->diff);
341 printf(" offset_in_span=0x%08lx\n",
342 (long unsigned int)quad->offsetInSpan);
343 printf(" logical start=0x%08lx, end=0x%08lx\n",
344 (long unsigned int)quad->logStart,
345 (long unsigned int)quad->logEnd);
354 ******************************************************************************
356 * This routine calculates the Span block for given row using spanset.
359 * instance - HBA instance
360 * ld - Logical drive number
367 * block - Absolute Block number in the physical disk
368 * div_error - Devide error code.
371 u_int32_t mr_spanset_get_span_block(struct mrsas_softc *sc, u_int32_t ld, u_int64_t row,
372 u_int64_t *span_blk, MR_FW_RAID_MAP_ALL *map, int *div_error)
374 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
375 LD_SPAN_SET *span_set;
376 MR_QUAD_ELEMENT *quad;
377 u_int32_t span, info;
378 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
380 for (info=0; info < MAX_QUAD_DEPTH; info++) {
381 span_set = &(ldSpanInfo[ld].span_set[info]);
383 if (span_set->span_row_data_width == 0) break;
384 if (row > span_set->data_row_end) continue;
386 for (span=0; span<raid->spanDepth; span++)
387 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
388 block_span_info.noElements >= info+1) {
389 quad = &map->raidMap.ldSpanMap[ld].
391 block_span_info.quad[info];
392 if (quad->diff == 0) {
396 if ( quad->logStart <= row &&
397 row <= quad->logEnd &&
398 (mega_mod64(row - quad->logStart,
399 quad->diff)) == 0 ) {
400 if (span_blk != NULL) {
403 ((row - quad->logStart),
405 blk = (blk + quad->offsetInSpan)
406 << raid->stripeShift;
417 ******************************************************************************
419 * This routine calculates the row for given strip using spanset.
422 * instance - HBA instance
423 * ld - Logical drive number
429 * row - row associated with strip
432 static u_int64_t get_row_from_strip(struct mrsas_softc *sc,
433 u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
435 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
436 LD_SPAN_SET *span_set;
437 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
438 u_int32_t info, strip_offset, span, span_offset;
439 u_int64_t span_set_Strip, span_set_Row;
441 for (info=0; info < MAX_QUAD_DEPTH; info++) {
442 span_set = &(ldSpanInfo[ld].span_set[info]);
444 if (span_set->span_row_data_width == 0) break;
445 if (strip > span_set->data_strip_end) continue;
447 span_set_Strip = strip - span_set->data_strip_start;
448 strip_offset = mega_mod64(span_set_Strip,
449 span_set->span_row_data_width);
450 span_set_Row = mega_div64_32(span_set_Strip,
451 span_set->span_row_data_width) * span_set->diff;
452 for (span=0,span_offset=0; span<raid->spanDepth; span++)
453 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
454 block_span_info.noElements >=info+1) {
456 span_set->strip_offset[span])
461 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : Strip 0x%llx, span_set_Strip 0x%llx, span_set_Row 0x%llx "
462 "data width 0x%llx span offset 0x%llx\n", (unsigned long long)strip,
463 (unsigned long long)span_set_Strip,
464 (unsigned long long)span_set_Row,
465 (unsigned long long)span_set->span_row_data_width, (unsigned long long)span_offset);
466 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : For strip 0x%llx row is 0x%llx\n", (unsigned long long)strip,
467 (unsigned long long) span_set->data_row_start +
468 (unsigned long long) span_set_Row + (span_offset - 1));
469 return (span_set->data_row_start + span_set_Row + (span_offset - 1));
476 ******************************************************************************
478 * This routine calculates the Start Strip for given row using spanset.
481 * instance - HBA instance
482 * ld - Logical drive number
488 * Strip - Start strip associated with row
491 static u_int64_t get_strip_from_row(struct mrsas_softc *sc,
492 u_int32_t ld, u_int64_t row, MR_FW_RAID_MAP_ALL *map)
494 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
495 LD_SPAN_SET *span_set;
496 MR_QUAD_ELEMENT *quad;
497 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
498 u_int32_t span, info;
501 for (info=0; info<MAX_QUAD_DEPTH; info++) {
502 span_set = &(ldSpanInfo[ld].span_set[info]);
504 if (span_set->span_row_data_width == 0) break;
505 if (row > span_set->data_row_end) continue;
507 for (span=0; span<raid->spanDepth; span++)
508 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
509 block_span_info.noElements >=info+1) {
510 quad = &map->raidMap.ldSpanMap[ld].
511 spanBlock[span].block_span_info.quad[info];
512 if ( quad->logStart <= row &&
513 row <= quad->logEnd &&
514 mega_mod64((row - quad->logStart),
516 strip = mega_div64_32
517 (((row - span_set->data_row_start)
520 strip *= span_set->span_row_data_width;
521 strip += span_set->data_strip_start;
522 strip += span_set->strip_offset[span];
527 mrsas_dprint(sc, MRSAS_PRL11,"LSI Debug - get_strip_from_row: returns invalid "
528 "strip for ld=%x, row=%lx\n", ld, (long unsigned int)row);
533 ******************************************************************************
535 * This routine calculates the Physical Arm for given strip using spanset.
538 * instance - HBA instance
539 * ld - Logical drive number
545 * Phys Arm - Phys Arm associated with strip
548 static u_int32_t get_arm_from_strip(struct mrsas_softc *sc,
549 u_int32_t ld, u_int64_t strip, MR_FW_RAID_MAP_ALL *map)
551 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
552 LD_SPAN_SET *span_set;
553 PLD_SPAN_INFO ldSpanInfo = sc->log_to_span;
554 u_int32_t info, strip_offset, span, span_offset;
556 for (info=0; info<MAX_QUAD_DEPTH; info++) {
557 span_set = &(ldSpanInfo[ld].span_set[info]);
559 if (span_set->span_row_data_width == 0) break;
560 if (strip > span_set->data_strip_end) continue;
562 strip_offset = (u_int32_t)mega_mod64
563 ((strip - span_set->data_strip_start),
564 span_set->span_row_data_width);
566 for (span=0,span_offset=0; span<raid->spanDepth; span++)
567 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
568 block_span_info.noElements >=info+1) {
570 span_set->strip_offset[span])
572 span_set->strip_offset[span];
576 mrsas_dprint(sc, MRSAS_PRL11, "LSI PRL11: get_arm_from_strip: "
577 " for ld=0x%x strip=0x%lx arm is 0x%x\n", ld,
578 (long unsigned int)strip, (strip_offset - span_offset));
579 return (strip_offset - span_offset);
582 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: - get_arm_from_strip: returns invalid arm"
583 " for ld=%x strip=%lx\n", ld, (long unsigned int)strip);
589 /* This Function will return Phys arm */
590 u_int8_t get_arm(struct mrsas_softc *sc, u_int32_t ld, u_int8_t span, u_int64_t stripe,
591 MR_FW_RAID_MAP_ALL *map)
593 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
594 /* Need to check correct default value */
597 switch (raid->level) {
601 arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span));
604 // start with logical arm
605 arm = get_arm_from_strip(sc, ld, stripe, map);
615 ******************************************************************************
617 * This routine calculates the arm, span and block for the specified stripe and
618 * reference in stripe using spanset
622 * ld - Logical drive number
623 * stripRow - Stripe number
624 * stripRef - Reference in stripe
629 * block - Absolute Block number in the physical disk
631 static u_int8_t mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripRow,
632 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
633 RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
635 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
637 u_int8_t physArm, span;
639 u_int8_t retval = TRUE;
640 u_int64_t *pdBlock = &io_info->pdBlock;
641 u_int16_t *pDevHandle = &io_info->devHandle;
642 u_int32_t logArm, rowMod, armQ, arm;
643 u_int8_t do_invader = 0;
645 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
648 // Get row and span from io_info for Uneven Span IO.
649 row = io_info->start_row;
650 span = io_info->start_span;
653 if (raid->level == 6) {
654 logArm = get_arm_from_strip(sc, ld, stripRow, map);
655 rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span));
656 armQ = SPAN_ROW_SIZE(map,ld,span) - 1 - rowMod;
657 arm = armQ + 1 + logArm;
658 if (arm >= SPAN_ROW_SIZE(map, ld, span))
659 arm -= SPAN_ROW_SIZE(map ,ld ,span);
660 physArm = (u_int8_t)arm;
663 physArm = get_arm(sc, ld, span, stripRow, map);
666 arRef = MR_LdSpanArrayGet(ld, span, map);
667 pd = MR_ArPdGet(arRef, physArm, map);
669 if (pd != MR_PD_INVALID)
670 *pDevHandle = MR_PdDevHandleGet(pd, map);
672 *pDevHandle = MR_PD_INVALID;
673 if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
674 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
675 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
676 else if (raid->level == 1) {
677 pd = MR_ArPdGet(arRef, physArm + 1, map);
678 if (pd != MR_PD_INVALID)
679 *pDevHandle = MR_PdDevHandleGet(pd, map);
683 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
684 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
689 * MR_BuildRaidContext: Set up Fast path RAID context
691 * This function will initiate command processing. The start/end row
692 * and strip information is calculated then the lock is acquired.
693 * This function will return 0 if region lock was acquired OR return
697 MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
698 RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
701 u_int32_t ld, stripSize, stripe_mask;
702 u_int64_t endLba, endStrip, endRow, start_row, start_strip;
705 u_int8_t num_strips, numRows;
706 u_int16_t ref_in_start_stripe, ref_in_end_stripe;
707 u_int64_t ldStartBlock;
708 u_int32_t numBlocks, ldTgtId;
709 u_int8_t isRead, stripIdx;
711 u_int8_t startlba_span = SPAN_INVALID;
712 u_int64_t *pdBlock = &io_info->pdBlock;
715 ldStartBlock = io_info->ldStartBlock;
716 numBlocks = io_info->numBlocks;
717 ldTgtId = io_info->ldTgtId;
718 isRead = io_info->isRead;
720 io_info->IoforUnevenSpan = 0;
721 io_info->start_span = SPAN_INVALID;
723 ld = MR_TargetIdToLdGet(ldTgtId, map);
724 raid = MR_LdRaidGet(ld, map);
727 * if rowDataSize @RAID map and spanRowDataSize @SPAN INFO are zero
730 if (raid->rowDataSize == 0) {
731 if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0)
733 else if (sc->UnevenSpanSupport) {
734 io_info->IoforUnevenSpan = 1;
737 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: raid->rowDataSize is 0, but has SPAN[0] rowDataSize = 0x%0x,"
738 " but there is _NO_ UnevenSpanSupport\n",
739 MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize);
743 stripSize = 1 << raid->stripeShift;
744 stripe_mask = stripSize-1;
746 * calculate starting row and stripe, and number of strips and rows
748 start_strip = ldStartBlock >> raid->stripeShift;
749 ref_in_start_stripe = (u_int16_t)(ldStartBlock & stripe_mask);
750 endLba = ldStartBlock + numBlocks - 1;
751 ref_in_end_stripe = (u_int16_t)(endLba & stripe_mask);
752 endStrip = endLba >> raid->stripeShift;
753 num_strips = (u_int8_t)(endStrip - start_strip + 1); // End strip
754 if (io_info->IoforUnevenSpan) {
755 start_row = get_row_from_strip(sc, ld, start_strip, map);
756 endRow = get_row_from_strip(sc, ld, endStrip, map);
757 if (raid->spanDepth == 1) {
759 *pdBlock = start_row << raid->stripeShift;
761 startlba_span = (u_int8_t)mr_spanset_get_span_block(sc, ld, start_row,
762 pdBlock, map, &error_code);
763 if (error_code == 1) {
764 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d. Send IO w/o region lock.\n",
769 if (startlba_span == SPAN_INVALID) {
770 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: return from %s %d for row 0x%llx,"
771 "start strip %llx endSrip %llx\n", __func__,
772 __LINE__, (unsigned long long)start_row,
773 (unsigned long long)start_strip,
774 (unsigned long long)endStrip);
777 io_info->start_span = startlba_span;
778 io_info->start_row = start_row;
779 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug: Check Span number from %s %d for row 0x%llx, "
780 " start strip 0x%llx endSrip 0x%llx span 0x%x\n",
781 __func__, __LINE__, (unsigned long long)start_row,
782 (unsigned long long)start_strip,
783 (unsigned long long)endStrip, startlba_span);
784 mrsas_dprint(sc, MRSAS_PRL11, "LSI Debug : 1. start_row 0x%llx endRow 0x%llx Start span 0x%x\n",
785 (unsigned long long)start_row, (unsigned long long)endRow, startlba_span);
787 start_row = mega_div64_32(start_strip, raid->rowDataSize); // Start Row
788 endRow = mega_div64_32(endStrip, raid->rowDataSize);
791 numRows = (u_int8_t)(endRow - start_row + 1); // get the row count
794 * Calculate region info. (Assume region at start of first row, and
795 * assume this IO needs the full row - will adjust if not true.)
797 regStart = start_row << raid->stripeShift;
800 /* Check if we can send this I/O via FastPath */
801 if (raid->capability.fpCapable) {
803 io_info->fpOkForIo = (raid->capability.fpReadCapable &&
804 ((num_strips == 1) ||
806 fpReadAcrossStripe));
808 io_info->fpOkForIo = (raid->capability.fpWriteCapable &&
809 ((num_strips == 1) ||
811 fpWriteAcrossStripe));
814 io_info->fpOkForIo = FALSE;
817 if (num_strips == 1) {
818 /* single-strip IOs can always lock only the data needed,
819 multi-strip IOs always need to full stripe locked */
820 regStart += ref_in_start_stripe;
824 else if (io_info->IoforUnevenSpan == 0){
825 // For Even span region lock optimization.
826 // If the start strip is the last in the start row
827 if (start_strip == (start_row + 1) * raid->rowDataSize - 1) {
828 regStart += ref_in_start_stripe;
829 // initialize count to sectors from startRef to end of strip
830 regSize = stripSize - ref_in_start_stripe;
832 // add complete rows in the middle of the transfer
834 regSize += (numRows-2) << raid->stripeShift;
836 // if IO ends within first strip of last row
837 if (endStrip == endRow*raid->rowDataSize)
838 regSize += ref_in_end_stripe+1;
840 regSize += stripSize;
842 //For Uneven span region lock optimization.
843 // If the start strip is the last in the start row
844 if (start_strip == (get_strip_from_row(sc, ld, start_row, map) +
845 SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) {
846 regStart += ref_in_start_stripe;
847 // initialize count to sectors from startRef to end of strip
848 regSize = stripSize - ref_in_start_stripe;
850 // add complete rows in the middle of the transfer
852 regSize += (numRows-2) << raid->stripeShift;
854 // if IO ends within first strip of last row
855 if (endStrip == get_strip_from_row(sc, ld, endRow, map))
856 regSize += ref_in_end_stripe+1;
858 regSize += stripSize;
860 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
861 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
862 pRAID_Context->regLockFlags = (isRead)? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
864 pRAID_Context->regLockFlags = (isRead)? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
865 pRAID_Context->VirtualDiskTgtId = raid->targetId;
866 pRAID_Context->regLockRowLBA = regStart;
867 pRAID_Context->regLockLength = regSize;
868 pRAID_Context->configSeqNum = raid->seqNum;
871 * Get Phy Params only if FP capable, or else leave it to MR firmware
872 * to do the calculation.
874 if (io_info->fpOkForIo) {
875 retval = io_info->IoforUnevenSpan ?
876 mr_spanset_get_phy_params(sc, ld,
877 start_strip, ref_in_start_stripe, io_info,
878 pRAID_Context, map) :
879 MR_GetPhyParams(sc, ld, start_strip,
880 ref_in_start_stripe, io_info, pRAID_Context, map);
881 /* If IO on an invalid Pd, then FP is not possible */
882 if (io_info->devHandle == MR_PD_INVALID)
883 io_info->fpOkForIo = FALSE;
887 for (stripIdx=0; stripIdx<num_strips; stripIdx++) {
888 retval = io_info->IoforUnevenSpan ?
889 mr_spanset_get_phy_params(sc, ld,
890 start_strip + stripIdx,
891 ref_in_start_stripe, io_info,
892 pRAID_Context, map) :
893 MR_GetPhyParams(sc, ld,
894 start_strip + stripIdx, ref_in_start_stripe,
895 io_info, pRAID_Context, map);
901 // Just for testing what arm we get for strip.
902 get_arm_from_strip(sc, ld, start_strip, map);
908 ******************************************************************************
910 * This routine pepare spanset info from Valid Raid map and store it into
911 * local copy of ldSpanInfo per instance data structure.
915 * ldSpanInfo - ldSpanInfo per HBA instance
918 void mr_update_span_set(MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
921 u_int32_t element,span_row_width;
924 LD_SPAN_SET *span_set, *span_set_prev;
925 MR_QUAD_ELEMENT *quad;
932 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
934 ld = MR_TargetIdToLdGet(ldCount, map);
935 if (ld >= MAX_LOGICAL_DRIVES)
937 raid = MR_LdRaidGet(ld, map);
938 for (element=0; element < MAX_QUAD_DEPTH; element++) {
939 for (span=0; span < raid->spanDepth; span++) {
940 if (map->raidMap.ldSpanMap[ld].spanBlock[span].
941 block_span_info.noElements < element+1)
944 span_set = &(ldSpanInfo[ld].span_set[element]);
945 quad = &map->raidMap.ldSpanMap[ld].
946 spanBlock[span].block_span_info.
949 span_set->diff = quad->diff;
951 for (count=0,span_row_width=0;
952 count<raid->spanDepth; count++) {
953 if (map->raidMap.ldSpanMap[ld].
956 noElements >=element+1) {
957 span_set->strip_offset[count] =
961 (ld, count, map)->spanRowDataSize;
963 printf("LSI Debug span %x rowDataSize %x\n",
964 count, MR_LdSpanPtrGet
965 (ld, count, map)->spanRowDataSize);
970 span_set->span_row_data_width = span_row_width;
971 span_row = mega_div64_32(((quad->logEnd -
972 quad->logStart) + quad->diff), quad->diff);
975 span_set->log_start_lba = 0;
976 span_set->log_end_lba =
977 ((span_row << raid->stripeShift) * span_row_width) - 1;
979 span_set->span_row_start = 0;
980 span_set->span_row_end = span_row - 1;
982 span_set->data_strip_start = 0;
983 span_set->data_strip_end =
984 (span_row * span_row_width) - 1;
986 span_set->data_row_start = 0;
987 span_set->data_row_end =
988 (span_row * quad->diff) - 1;
990 span_set_prev = &(ldSpanInfo[ld].
991 span_set[element - 1]);
992 span_set->log_start_lba =
993 span_set_prev->log_end_lba + 1;
994 span_set->log_end_lba =
995 span_set->log_start_lba +
996 ((span_row << raid->stripeShift) * span_row_width) - 1;
998 span_set->span_row_start =
999 span_set_prev->span_row_end + 1;
1000 span_set->span_row_end =
1001 span_set->span_row_start + span_row - 1;
1003 span_set->data_strip_start =
1004 span_set_prev->data_strip_end + 1;
1005 span_set->data_strip_end =
1006 span_set->data_strip_start +
1007 (span_row * span_row_width) - 1;
1009 span_set->data_row_start =
1010 span_set_prev->data_row_end + 1;
1011 span_set->data_row_end =
1012 span_set->data_row_start +
1013 (span_row * quad->diff) - 1;
1017 if (span == raid->spanDepth) break; // no quads remain
1021 getSpanInfo(map, ldSpanInfo); //to get span set info
1026 * mrsas_update_load_balance_params: Update load balance parmas
1027 * Inputs: map pointer
1031 * This function updates the load balance parameters for the LD config
1032 * of a two drive optimal RAID-1.
1034 void mrsas_update_load_balance_params(MR_FW_RAID_MAP_ALL *map,
1035 PLD_LOAD_BALANCE_INFO lbInfo)
1039 u_int32_t pd, arRef;
1042 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++)
1044 ld = MR_TargetIdToLdGet(ldCount, map);
1045 if (ld >= MAX_LOGICAL_DRIVES) {
1046 lbInfo[ldCount].loadBalanceFlag = 0;
1050 raid = MR_LdRaidGet(ld, map);
1052 /* Two drive Optimal RAID 1 */
1053 if ((raid->level == 1) && (raid->rowSize == 2) &&
1054 (raid->spanDepth == 1)
1055 && raid->ldState == MR_LD_STATE_OPTIMAL) {
1056 lbInfo[ldCount].loadBalanceFlag = 1;
1058 /* Get the array on which this span is present */
1059 arRef = MR_LdSpanArrayGet(ld, 0, map);
1062 pd = MR_ArPdGet(arRef, 0, map);
1063 /* Get dev handle from PD */
1064 lbInfo[ldCount].raid1DevHandle[0] = MR_PdDevHandleGet(pd, map);
1065 pd = MR_ArPdGet(arRef, 1, map);
1066 lbInfo[ldCount].raid1DevHandle[1] = MR_PdDevHandleGet(pd, map);
1069 lbInfo[ldCount].loadBalanceFlag = 0;
1075 * mrsas_set_pd_lba: Sets PD LBA
1076 * input: io_request pointer
1080 * Local RAID map pointer
1084 * Used to set the PD logical block address in CDB for FP IOs.
1086 void mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST *io_request, u_int8_t cdb_len,
1087 struct IO_REQUEST_INFO *io_info, union ccb *ccb,
1088 MR_FW_RAID_MAP_ALL *local_map_ptr, u_int32_t ref_tag,
1089 u_int32_t ld_block_size)
1093 u_int64_t start_blk = io_info->pdBlock;
1094 u_int8_t *cdb = io_request->CDB.CDB32;
1095 u_int32_t num_blocks = io_info->numBlocks;
1096 u_int8_t opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1097 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1099 /* Check if T10 PI (DIF) is enabled for this LD */
1100 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
1101 raid = MR_LdRaidGet(ld, local_map_ptr);
1102 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
1103 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1104 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
1105 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
1107 if (ccb_h->flags == CAM_DIR_OUT)
1108 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
1110 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
1111 cdb[10] = MRSAS_RD_WR_PROTECT_CHECK_ALL;
1114 cdb[12] = (u_int8_t)((start_blk >> 56) & 0xff);
1115 cdb[13] = (u_int8_t)((start_blk >> 48) & 0xff);
1116 cdb[14] = (u_int8_t)((start_blk >> 40) & 0xff);
1117 cdb[15] = (u_int8_t)((start_blk >> 32) & 0xff);
1118 cdb[16] = (u_int8_t)((start_blk >> 24) & 0xff);
1119 cdb[17] = (u_int8_t)((start_blk >> 16) & 0xff);
1120 cdb[18] = (u_int8_t)((start_blk >> 8) & 0xff);
1121 cdb[19] = (u_int8_t)(start_blk & 0xff);
1123 /* Logical block reference tag */
1124 io_request->CDB.EEDP32.PrimaryReferenceTag = swap32(ref_tag);
1125 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff;
1126 io_request->IoFlags = 32; /* Specify 32-byte cdb */
1128 /* Transfer length */
1129 cdb[28] = (u_int8_t)((num_blocks >> 24) & 0xff);
1130 cdb[29] = (u_int8_t)((num_blocks >> 16) & 0xff);
1131 cdb[30] = (u_int8_t)((num_blocks >> 8) & 0xff);
1132 cdb[31] = (u_int8_t)(num_blocks & 0xff);
1134 /* set SCSI IO EEDP Flags */
1135 if (ccb_h->flags == CAM_DIR_OUT) {
1136 io_request->EEDPFlags =
1137 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1138 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1139 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1140 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1141 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
1144 io_request->EEDPFlags =
1145 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1146 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
1148 io_request->Control |= (0x4 << 26);
1149 io_request->EEDPBlockSize = ld_block_size;
1152 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
1153 if (((cdb_len == 12) || (cdb_len == 16)) &&
1154 (start_blk <= 0xffffffff)) {
1155 if (cdb_len == 16) {
1156 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
1162 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
1168 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1175 /* Transfer length */
1176 cdb[8] = (u_int8_t)(num_blocks & 0xff);
1177 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
1179 io_request->IoFlags = 10; /* Specify 10-byte cdb */
1181 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
1182 /* Convert to 16 byte CDB for large LBA's */
1185 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
1189 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
1195 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
1202 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1209 /* Transfer length */
1210 cdb[13] = (u_int8_t)(num_blocks & 0xff);
1211 cdb[12] = (u_int8_t)((num_blocks >> 8) & 0xff);
1212 cdb[11] = (u_int8_t)((num_blocks >> 16) & 0xff);
1213 cdb[10] = (u_int8_t)((num_blocks >> 24) & 0xff);
1215 io_request->IoFlags = 16; /* Specify 16-byte cdb */
1217 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
1218 /* convert to 10 byte CDB */
1219 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
1222 memset(cdb, 0, sizeof(cdb));
1226 /* Set transfer length */
1227 cdb[8] = (u_int8_t)(num_blocks & 0xff);
1228 cdb[7] = (u_int8_t)((num_blocks >> 8) & 0xff);
1230 /* Specify 10-byte cdb */
1234 /* Fall through normal case, just load LBA here */
1239 u_int8_t val = cdb[1] & 0xE0;
1240 cdb[3] = (u_int8_t)(start_blk & 0xff);
1241 cdb[2] = (u_int8_t)((start_blk >> 8) & 0xff);
1242 cdb[1] = val | ((u_int8_t)(start_blk >> 16) & 0x1f);
1246 cdb[5] = (u_int8_t)(start_blk & 0xff);
1247 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
1248 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
1249 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
1252 cdb[5] = (u_int8_t)(start_blk & 0xff);
1253 cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
1254 cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
1255 cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
1258 cdb[9] = (u_int8_t)(start_blk & 0xff);
1259 cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff);
1260 cdb[7] = (u_int8_t)((start_blk >> 16) & 0xff);
1261 cdb[6] = (u_int8_t)((start_blk >> 24) & 0xff);
1262 cdb[5] = (u_int8_t)((start_blk >> 32) & 0xff);
1263 cdb[4] = (u_int8_t)((start_blk >> 40) & 0xff);
1264 cdb[3] = (u_int8_t)((start_blk >> 48) & 0xff);
1265 cdb[2] = (u_int8_t)((start_blk >> 56) & 0xff);
1272 * mrsas_get_best_arm Determine the best spindle arm
1273 * Inputs: Load balance info
1275 * This function determines and returns the best arm by looking at the
1276 * parameters of the last PD access.
1278 u_int8_t mrsas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
1279 u_int64_t block, u_int32_t count)
1281 u_int16_t pend0, pend1;
1282 u_int64_t diff0, diff1;
1285 /* get the pending cmds for the data and mirror arms */
1286 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
1287 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
1289 /* Determine the disk whose head is nearer to the req. block */
1290 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
1291 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
1292 bestArm = (diff0 <= diff1 ? 0 : 1);
1294 if ((bestArm == arm && pend0 > pend1 + 16) || (bestArm != arm && pend1 > pend0 + 16))
1297 /* Update the last accessed block on the correct pd */
1298 lbInfo->last_accessed_block[bestArm] = block + count - 1;
1304 * mrsas_get_updated_dev_handle Get the update dev handle
1305 * Inputs: Load balance info
1308 * This function determines and returns the updated dev handle.
1310 u_int16_t mrsas_get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo,
1311 struct IO_REQUEST_INFO *io_info)
1313 u_int8_t arm, old_arm;
1314 u_int16_t devHandle;
1316 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
1318 /* get best new arm */
1319 arm = mrsas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock, io_info->numBlocks);
1320 devHandle = lbInfo->raid1DevHandle[arm];
1321 atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
1327 * MR_GetPhyParams Calculates arm, span, and block
1328 * Inputs: Adapter instance soft state
1329 * Logical drive number (LD)
1330 * Stripe number (stripRow)
1331 * Reference in stripe (stripRef)
1332 * Outputs: Span number
1333 * Absolute Block number in the physical disk
1335 * This routine calculates the arm, span and block for the specified stripe
1336 * and reference in stripe.
1338 u_int8_t MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
1340 u_int16_t stripRef, struct IO_REQUEST_INFO *io_info,
1341 RAID_CONTEXT *pRAID_Context, MR_FW_RAID_MAP_ALL *map)
1343 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
1344 u_int32_t pd, arRef;
1345 u_int8_t physArm, span;
1347 u_int8_t retval = TRUE;
1349 u_int64_t *pdBlock = &io_info->pdBlock;
1350 u_int16_t *pDevHandle = &io_info->devHandle;
1351 u_int32_t rowMod, armQ, arm, logArm;
1352 u_int8_t do_invader = 0;
1354 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
1357 row = mega_div64_32(stripRow, raid->rowDataSize);
1359 if (raid->level == 6) {
1360 logArm = mega_mod64(stripRow, raid->rowDataSize); // logical arm within row
1361 if (raid->rowSize == 0)
1363 rowMod = mega_mod64(row, raid->rowSize); // get logical row mod
1364 armQ = raid->rowSize-1-rowMod; // index of Q drive
1365 arm = armQ+1+logArm; // data always logically follows Q
1366 if (arm >= raid->rowSize) // handle wrap condition
1367 arm -= raid->rowSize;
1368 physArm = (u_int8_t)arm;
1371 if (raid->modFactor == 0)
1373 physArm = MR_LdDataArmGet(ld, mega_mod64(stripRow, raid->modFactor), map);
1376 if (raid->spanDepth == 1) {
1378 *pdBlock = row << raid->stripeShift;
1381 span = (u_int8_t)MR_GetSpanBlock(ld, row, pdBlock, map, &error_code);
1382 if (error_code == 1)
1386 /* Get the array on which this span is present */
1387 arRef = MR_LdSpanArrayGet(ld, span, map);
1389 pd = MR_ArPdGet(arRef, physArm, map); // Get the Pd.
1391 if (pd != MR_PD_INVALID)
1392 *pDevHandle = MR_PdDevHandleGet(pd, map); // Get dev handle from Pd.
1394 *pDevHandle = MR_PD_INVALID; // set dev handle as invalid.
1395 if ((raid->level >= 5) && ((!do_invader) || (do_invader &&
1396 raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
1397 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
1398 else if (raid->level == 1) {
1399 pd = MR_ArPdGet(arRef, physArm + 1, map); // Get Alternate Pd.
1400 if (pd != MR_PD_INVALID)
1401 *pDevHandle = MR_PdDevHandleGet(pd, map);//Get dev handle from Pd.
1405 *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk;
1406 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
1411 * MR_GetSpanBlock Calculates span block
1416 * Outputs: Span number
1419 * This routine calculates the span from the span block info.
1421 u_int32_t MR_GetSpanBlock(u_int32_t ld, u_int64_t row, u_int64_t *span_blk,
1422 MR_FW_RAID_MAP_ALL *map, int *div_error)
1424 MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
1425 MR_QUAD_ELEMENT *quad;
1426 MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
1428 u_int64_t blk, debugBlk;
1430 for (span=0; span < raid->spanDepth; span++, pSpanBlock++) {
1431 for (j=0; j < pSpanBlock->block_span_info.noElements; j++) {
1432 quad = &pSpanBlock->block_span_info.quad[j];
1433 if (quad->diff == 0) {
1437 if (quad->logStart <= row && row <= quad->logEnd &&
1438 (mega_mod64(row-quad->logStart, quad->diff)) == 0) {
1439 if (span_blk != NULL) {
1440 blk = mega_div64_32((row-quad->logStart), quad->diff);
1442 blk = (blk + quad->offsetInSpan) << raid->stripeShift;