2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include "dev/mrsas/mrsas.h"
39 #include <cam/cam_ccb.h>
40 #include <cam/cam_sim.h>
41 #include <cam/cam_xpt_sim.h>
42 #include <cam/cam_debug.h>
43 #include <cam/cam_periph.h>
44 #include <cam/cam_xpt_periph.h>
46 #include <cam/scsi/scsi_all.h>
47 #include <cam/scsi/scsi_message.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
51 #include <sys/time.h> /* XXX for pcpu.h */
52 #include <sys/pcpu.h> /* XXX for PCPU_GET */
54 #define smp_processor_id() PCPU_GET(cpuid)
59 int mrsas_cam_attach(struct mrsas_softc *sc);
60 int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb);
61 int mrsas_bus_scan(struct mrsas_softc *sc);
62 int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
64 mrsas_map_request(struct mrsas_softc *sc,
65 struct mrsas_mpt_cmd *cmd, union ccb *ccb);
67 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
70 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
73 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
74 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible);
76 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
77 union ccb *ccb, u_int32_t device_id,
78 MRSAS_RAID_SCSI_IO_REQUEST * io_request);
79 void mrsas_xpt_freeze(struct mrsas_softc *sc);
80 void mrsas_xpt_release(struct mrsas_softc *sc);
81 void mrsas_cam_detach(struct mrsas_softc *sc);
82 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
83 void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
84 void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
86 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
87 u_int32_t req_desc_hi);
89 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request,
90 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
91 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
92 u_int32_t ld_block_size);
93 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
94 static void mrsas_cam_poll(struct cam_sim *sim);
95 static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
96 static void mrsas_scsiio_timeout(void *data);
97 static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id);
98 static void mrsas_tm_response_code(struct mrsas_softc *sc,
99 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply);
100 static int mrsas_issue_tm(struct mrsas_softc *sc,
101 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc);
103 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
104 int nseg, int error);
106 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
109 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
110 bus_dma_segment_t *segs, int nsegs);
111 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd,
112 bus_dma_segment_t *segs, int nseg);
113 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd,
114 bus_dma_segment_t *segs, int nseg);
116 struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
117 MRSAS_REQUEST_DESCRIPTOR_UNION *
118 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
120 extern int mrsas_reset_targets(struct mrsas_softc *sc);
121 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
123 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
124 extern void mrsas_isr(void *arg);
125 extern void mrsas_aen_handler(struct mrsas_softc *sc);
127 MR_BuildRaidContext(struct mrsas_softc *sc,
128 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
129 MR_DRV_RAID_MAP_ALL * map);
131 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
132 MR_DRV_RAID_MAP_ALL * map);
134 mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
135 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info);
136 extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
137 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
138 extern void mrsas_disable_intr(struct mrsas_softc *sc);
139 extern void mrsas_enable_intr(struct mrsas_softc *sc);
140 void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
141 struct mrsas_mpt_cmd *cmd);
144 * mrsas_cam_attach: Main entry to CAM subsystem
145 * input: Adapter instance soft state
147 * This function is called from mrsas_attach() during initialization to perform
148 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or
149 * earlier, it would also initiate a bus scan.
152 mrsas_cam_attach(struct mrsas_softc *sc)
154 struct cam_devq *devq;
157 mrsas_cam_depth = sc->max_scsi_cmds;
159 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
160 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
164 * Create SIM for bus 0 and register, also create path
166 sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
167 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
168 mrsas_cam_depth, devq);
169 if (sc->sim_0 == NULL) {
171 device_printf(sc->mrsas_dev, "Cannot register SIM\n");
174 /* Initialize taskqueue for Event Handling */
175 TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
176 sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
177 taskqueue_thread_enqueue, &sc->ev_tq);
179 /* Run the task queue with lowest priority */
180 taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
181 device_get_nameunit(sc->mrsas_dev));
182 mtx_lock(&sc->sim_lock);
183 if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) {
184 cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */
185 mtx_unlock(&sc->sim_lock);
188 if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
189 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
190 xpt_bus_deregister(cam_sim_path(sc->sim_0));
191 cam_sim_free(sc->sim_0, TRUE); /* passing true will free the
193 mtx_unlock(&sc->sim_lock);
196 mtx_unlock(&sc->sim_lock);
199 * Create SIM for bus 1 and register, also create path
201 sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
202 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
203 mrsas_cam_depth, devq);
204 if (sc->sim_1 == NULL) {
206 device_printf(sc->mrsas_dev, "Cannot register SIM\n");
209 mtx_lock(&sc->sim_lock);
210 if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) {
211 cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */
212 mtx_unlock(&sc->sim_lock);
215 if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
217 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
218 xpt_bus_deregister(cam_sim_path(sc->sim_1));
219 cam_sim_free(sc->sim_1, TRUE);
220 mtx_unlock(&sc->sim_lock);
223 mtx_unlock(&sc->sim_lock);
229 * mrsas_cam_detach: De-allocates and teardown CAM
230 * input: Adapter instance soft state
232 * De-registers and frees the paths and SIMs.
235 mrsas_cam_detach(struct mrsas_softc *sc)
237 if (sc->ev_tq != NULL)
238 taskqueue_free(sc->ev_tq);
239 mtx_lock(&sc->sim_lock);
241 xpt_free_path(sc->path_0);
243 xpt_bus_deregister(cam_sim_path(sc->sim_0));
244 cam_sim_free(sc->sim_0, FALSE);
247 xpt_free_path(sc->path_1);
249 xpt_bus_deregister(cam_sim_path(sc->sim_1));
250 cam_sim_free(sc->sim_1, TRUE);
252 mtx_unlock(&sc->sim_lock);
256 * mrsas_action: SIM callback entry point
257 * input: pointer to SIM pointer to CAM Control Block
259 * This function processes CAM subsystem requests. The type of request is stored
260 * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because
261 * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier.
264 mrsas_action(struct cam_sim *sim, union ccb *ccb)
266 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
267 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
271 * Check if the system going down
272 * or the adapter is in unrecoverable critical error
274 if (sc->remove_in_progress ||
275 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
276 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
281 switch (ccb->ccb_h.func_code) {
284 device_id = ccb_h->target_id;
287 * bus 0 is LD, bus 1 is for system-PD
289 if (cam_sim_bus(sim) == 1 &&
290 sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
291 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
294 if (mrsas_startio(sc, sim, ccb)) {
295 ccb->ccb_h.status |= CAM_REQ_INVALID;
303 ccb->ccb_h.status = CAM_UA_ABORT;
312 case XPT_GET_TRAN_SETTINGS:
314 ccb->cts.protocol = PROTO_SCSI;
315 ccb->cts.protocol_version = SCSI_REV_2;
316 ccb->cts.transport = XPORT_SPI;
317 ccb->cts.transport_version = 2;
318 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
319 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
320 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
321 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
322 ccb->ccb_h.status = CAM_REQ_CMP;
326 case XPT_SET_TRAN_SETTINGS:
328 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
332 case XPT_CALC_GEOMETRY:
334 cam_calc_geometry(&ccb->ccg, 1);
340 ccb->cpi.version_num = 1;
341 ccb->cpi.hba_inquiry = 0;
342 ccb->cpi.target_sprt = 0;
343 ccb->cpi.hba_misc = PIM_UNMAPPED;
344 ccb->cpi.hba_eng_cnt = 0;
345 ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
346 ccb->cpi.unit_number = cam_sim_unit(sim);
347 ccb->cpi.bus_id = cam_sim_bus(sim);
348 ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
349 ccb->cpi.base_transfer_speed = 150000;
350 strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
351 strlcpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN);
352 strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
353 ccb->cpi.transport = XPORT_SPI;
354 ccb->cpi.transport_version = 2;
355 ccb->cpi.protocol = PROTO_SCSI;
356 ccb->cpi.protocol_version = SCSI_REV_2;
357 if (ccb->cpi.bus_id == 0)
358 ccb->cpi.max_target = MRSAS_MAX_PD - 1;
360 ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
361 ccb->cpi.maxio = sc->max_sectors_per_req * 512;
362 ccb->ccb_h.status = CAM_REQ_CMP;
368 ccb->ccb_h.status = CAM_REQ_INVALID;
376 * mrsas_scsiio_timeout: Callback function for IO timed out
377 * input: mpt command context
379 * This function will execute after timeout value provided by ccb header from
380 * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO
381 * coming from CAM layer. This function is callback function for IO timeout
382 * and it runs in no-sleep context. Set do_timedout_reset in Adapter context
383 * so that it will execute OCR/Kill adpter from ocr_thread context.
386 mrsas_scsiio_timeout(void *data)
388 struct mrsas_mpt_cmd *cmd;
389 struct mrsas_softc *sc;
395 cmd = (struct mrsas_mpt_cmd *)data;
398 if (cmd->ccb_ptr == NULL) {
399 printf("command timeout with NULL ccb\n");
404 * Below callout is dummy entry so that it will be cancelled from
405 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
406 * on OCR enable/disable property of Controller from ocr_thread
409 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
410 mrsas_scsiio_timeout, cmd, 0);
412 if (cmd->ccb_ptr->cpi.bus_id == 0)
413 target_id = cmd->ccb_ptr->ccb_h.target_id;
415 target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1));
417 /* Save the cmd to be processed for TM, if it is not there in the array */
418 if (sc->target_reset_pool[target_id] == NULL) {
419 sc->target_reset_pool[target_id] = cmd;
420 mrsas_atomic_inc(&sc->target_reset_outstanding);
427 * mrsas_startio: SCSI IO entry point
428 * input: Adapter instance soft state
429 * pointer to CAM Control Block
431 * This function is the SCSI IO entry point and it initiates IO processing. It
432 * copies the IO and depending if the IO is read/write or inquiry, it would
433 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0
434 * if the command is sent to firmware successfully, otherwise it returns 1.
437 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
440 struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL;
441 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
442 struct ccb_scsiio *csio = &(ccb->csio);
443 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
446 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE &&
447 (!sc->fw_sync_cache_support)) {
448 ccb->ccb_h.status = CAM_REQ_CMP;
452 ccb_h->status |= CAM_SIM_QUEUED;
454 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) {
455 ccb_h->status |= CAM_REQUEUE_REQ;
457 mrsas_atomic_dec(&sc->fw_outstanding);
461 cmd = mrsas_get_mpt_cmd(sc);
464 ccb_h->status |= CAM_REQUEUE_REQ;
466 mrsas_atomic_dec(&sc->fw_outstanding);
470 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
471 if (ccb_h->flags & CAM_DIR_IN)
472 cmd->flags |= MRSAS_DIR_IN;
473 if (ccb_h->flags & CAM_DIR_OUT)
474 cmd->flags |= MRSAS_DIR_OUT;
476 cmd->flags = MRSAS_DIR_NONE; /* no data */
479 * XXX We don't yet support physical addresses here.
481 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
483 case CAM_DATA_SG_PADDR:
484 device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n",
486 mrsas_release_mpt_cmd(cmd);
487 ccb_h->status = CAM_REQ_INVALID;
488 ccb_h->status &= ~CAM_SIM_QUEUED;
491 device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n",
493 mrsas_release_mpt_cmd(cmd);
494 ccb_h->status = CAM_REQ_INVALID;
497 cmd->length = csio->dxfer_len;
499 cmd->data = csio->data_ptr;
502 cmd->length = csio->dxfer_len;
504 cmd->data = csio->data_ptr;
507 ccb->ccb_h.status = CAM_REQ_INVALID;
514 req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1);
516 device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
519 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
520 cmd->request_desc = req_desc;
522 if (ccb_h->flags & CAM_CDB_POINTER)
523 bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
525 bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
526 mtx_lock(&sc->raidmap_lock);
528 /* Check for IO type READ-WRITE targeted for Logical Volume */
529 cmd_type = mrsas_find_io_type(sim, ccb);
531 case READ_WRITE_LDIO:
532 /* Build READ-WRITE IO for Logical Volume */
533 if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
534 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
535 mtx_unlock(&sc->raidmap_lock);
536 mrsas_release_mpt_cmd(cmd);
540 case NON_READ_WRITE_LDIO:
541 /* Build NON READ-WRITE IO for Logical Volume */
542 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
543 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
544 mtx_unlock(&sc->raidmap_lock);
545 mrsas_release_mpt_cmd(cmd);
549 case READ_WRITE_SYSPDIO:
550 case NON_READ_WRITE_SYSPDIO:
551 if (sc->secure_jbod_support &&
552 (cmd_type == NON_READ_WRITE_SYSPDIO)) {
553 /* Build NON-RW IO for JBOD */
554 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
555 device_printf(sc->mrsas_dev,
556 "Build SYSPDIO failed.\n");
557 mtx_unlock(&sc->raidmap_lock);
558 mrsas_release_mpt_cmd(cmd);
562 /* Build RW IO for JBOD */
563 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
564 device_printf(sc->mrsas_dev,
565 "Build SYSPDIO failed.\n");
566 mtx_unlock(&sc->raidmap_lock);
567 mrsas_release_mpt_cmd(cmd);
572 mtx_unlock(&sc->raidmap_lock);
574 if (cmd->flags == MRSAS_DIR_IN) /* from device */
575 cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_READ);
576 else if (cmd->flags == MRSAS_DIR_OUT) /* to device */
577 cmd->io_request->Control |= htole32(MPI2_SCSIIO_CONTROL_WRITE);
579 cmd->io_request->SGLFlags = htole16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
580 cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
581 cmd->io_request->SenseBufferLowAddress = htole32(cmd->sense_phys_addr & 0xFFFFFFFF);
582 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
584 req_desc = cmd->request_desc;
585 req_desc->SCSIIO.SMID = htole16(cmd->index);
588 * Start timer for IO timeout. Default timeout value is 90 second.
590 cmd->callout_owner = true;
591 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
592 mrsas_scsiio_timeout, cmd, 0);
594 if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater)
595 sc->io_cmds_highwater++;
598 * if it is raid 1/10 fp write capable.
599 * try to get second command from pool and construct it.
600 * From FW, it has confirmed that lba values of two PDs corresponds to
601 * single R1/10 LD are always same
605 * driver side count always should be less than max_fw_cmds to get
608 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
609 mrsas_prepare_secondRaid1_IO(sc, cmd);
610 mrsas_fire_cmd(sc, req_desc->addr.u.low,
611 req_desc->addr.u.high);
612 r1_cmd = cmd->peer_cmd;
613 mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low,
614 r1_cmd->request_desc->addr.u.high);
616 mrsas_fire_cmd(sc, req_desc->addr.u.low,
617 req_desc->addr.u.high);
624 mrsas_atomic_dec(&sc->fw_outstanding);
629 * mrsas_find_io_type: Determines if IO is read/write or inquiry
630 * input: pointer to CAM Control Block
632 * This function determines if the IO is read/write or inquiry. It returns a 1
633 * if the IO is read/write and 0 if it is inquiry.
636 mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb)
638 struct ccb_scsiio *csio = &(ccb->csio);
640 switch (csio->cdb_io.cdb_bytes[0]) {
649 return (cam_sim_bus(sim) ?
650 READ_WRITE_SYSPDIO : READ_WRITE_LDIO);
652 return (cam_sim_bus(sim) ?
653 NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO);
658 * mrsas_get_mpt_cmd: Get a cmd from free command pool
659 * input: Adapter instance soft state
661 * This function removes an MPT command from the command free list and
664 struct mrsas_mpt_cmd *
665 mrsas_get_mpt_cmd(struct mrsas_softc *sc)
667 struct mrsas_mpt_cmd *cmd = NULL;
669 mtx_lock(&sc->mpt_cmd_pool_lock);
670 if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
671 cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
672 TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
677 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
682 cmd->load_balance = 0;
685 mtx_unlock(&sc->mpt_cmd_pool_lock);
690 * mrsas_release_mpt_cmd: Return a cmd to free command pool
691 * input: Command packet for return to free command pool
693 * This function returns an MPT command to the free command list.
696 mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
698 struct mrsas_softc *sc = cmd->sc;
700 mtx_lock(&sc->mpt_cmd_pool_lock);
701 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
702 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
703 cmd->peer_cmd = NULL;
704 cmd->cmd_completed = 0;
705 memset((uint8_t *)cmd->io_request, 0,
706 sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
707 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
708 mtx_unlock(&sc->mpt_cmd_pool_lock);
714 * mrsas_get_request_desc: Get request descriptor from array
715 * input: Adapter instance soft state
718 * This function returns a pointer to the request descriptor.
720 MRSAS_REQUEST_DESCRIPTOR_UNION *
721 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
725 KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range"));
726 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
728 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
731 /* mrsas_prepare_secondRaid1_IO
732 * It prepares the raid 1 second IO
735 mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
736 struct mrsas_mpt_cmd *cmd)
738 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
739 struct mrsas_mpt_cmd *r1_cmd;
741 r1_cmd = cmd->peer_cmd;
742 req_desc = cmd->request_desc;
745 * copy the io request frame as well as 8 SGEs data for r1
748 memcpy(r1_cmd->io_request, cmd->io_request,
749 (sizeof(MRSAS_RAID_SCSI_IO_REQUEST)));
750 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
751 (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION)));
753 /* sense buffer is different for r1 command */
754 r1_cmd->io_request->SenseBufferLowAddress = htole32(r1_cmd->sense_phys_addr & 0xFFFFFFFF);
755 r1_cmd->ccb_ptr = cmd->ccb_ptr;
757 req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1);
758 req_desc2->addr.Words = 0;
759 r1_cmd->request_desc = req_desc2;
760 req_desc2->SCSIIO.SMID = r1_cmd->index;
761 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
762 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
763 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
764 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
765 cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
767 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
770 * MSIxIndex of both commands request descriptors
773 r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex;
774 /* span arm is different for r1 cmd */
775 r1_cmd->io_request->RaidContext.raid_context_g35.spanArm =
776 cmd->io_request->RaidContext.raid_context_g35.spanArm + 1;
781 * mrsas_build_ldio_rw: Builds an LDIO command
782 * input: Adapter instance soft state
783 * Pointer to command packet
786 * This function builds the LDIO command packet. It returns 0 if the command is
787 * built successfully, otherwise it returns a 1.
790 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
793 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
794 struct ccb_scsiio *csio = &(ccb->csio);
796 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
798 device_id = ccb_h->target_id;
800 io_request = cmd->io_request;
801 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
802 io_request->RaidContext.raid_context.status = 0;
803 io_request->RaidContext.raid_context.exStatus = 0;
805 /* just the cdb len, other flags zero, and ORed-in later for FP */
806 io_request->IoFlags = htole16(csio->cdb_len);
808 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
809 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
811 io_request->DataLength = htole32(cmd->length);
813 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
814 if (sc->is_ventura || sc->is_aero)
815 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
818 * numSGE store lower 8 bit of sge_count. numSGEExt store
819 * higher 8 bit of sge_count
821 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
822 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
826 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
832 /* stream detection on read and and write IOs */
834 mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
835 struct IO_REQUEST_INFO *io_info)
837 u_int32_t device_id = io_info->ldTgtId;
838 LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id];
839 u_int32_t *track_stream = ¤t_ld_SD->mruBitMap;
840 u_int32_t streamNum, shiftedValues, unshiftedValues;
841 u_int32_t indexValueMask, shiftedValuesMask;
843 boolean_t isReadAhead = false;
844 STREAM_DETECT *current_SD;
846 /* find possible stream */
847 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
848 streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) &
850 current_SD = ¤t_ld_SD->streamTrack[streamNum];
852 * if we found a stream, update the raid context and
853 * also update the mruBitMap
855 if (current_SD->nextSeqLBA &&
856 io_info->ldStartBlock >= current_SD->nextSeqLBA &&
857 (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) &&
858 (current_SD->isRead == io_info->isRead)) {
859 if (io_info->ldStartBlock != current_SD->nextSeqLBA &&
860 (!io_info->isRead || !isReadAhead)) {
862 * Once the API availible we need to change this.
863 * At this point we are not allowing any gap
867 cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE;
868 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
870 * update the mruBitMap LRU
872 shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ;
873 shiftedValues = ((*track_stream & shiftedValuesMask) <<
874 BITS_PER_INDEX_STREAM);
875 indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM;
876 unshiftedValues = (*track_stream) &
877 (~(shiftedValuesMask | indexValueMask));
879 (unshiftedValues | shiftedValues | streamNum);
884 * if we did not find any stream, create a new one from the least recently used
886 streamNum = (*track_stream >>
887 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK;
888 current_SD = ¤t_ld_SD->streamTrack[streamNum];
889 current_SD->isRead = io_info->isRead;
890 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
891 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum);
896 * mrsas_setup_io: Set up data including Fast Path I/O
897 * input: Adapter instance soft state
898 * Pointer to command packet
901 * This function builds the DCDB inquiry command. It returns 0 if the command
902 * is built successfully, otherwise it returns a 1.
905 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
906 union ccb *ccb, u_int32_t device_id,
907 MRSAS_RAID_SCSI_IO_REQUEST * io_request)
909 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
910 struct ccb_scsiio *csio = &(ccb->csio);
911 struct IO_REQUEST_INFO io_info;
912 MR_DRV_RAID_MAP_ALL *map_ptr;
913 struct mrsas_mpt_cmd *r1_cmd = NULL;
916 u_int8_t fp_possible;
917 u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld;
918 u_int32_t datalength = 0;
920 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
927 * READ_6 (0x08) or WRITE_6 (0x0A) cdb
929 if (csio->cdb_len == 6) {
930 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
931 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) |
932 ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) |
933 (u_int32_t)csio->cdb_io.cdb_bytes[3];
934 start_lba_lo &= 0x1FFFFF;
937 * READ_10 (0x28) or WRITE_6 (0x2A) cdb
939 else if (csio->cdb_len == 10) {
940 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
941 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
942 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
943 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
944 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
945 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
948 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
950 else if (csio->cdb_len == 12) {
951 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
952 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
953 ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
954 ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
955 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
956 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
957 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
958 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
961 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
963 else if (csio->cdb_len == 16) {
964 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
965 ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
966 ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
967 ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
968 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) |
969 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
970 (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 |
971 ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
972 start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
973 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
974 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
975 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
977 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
978 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
979 io_info.numBlocks = datalength;
980 io_info.ldTgtId = device_id;
981 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
983 io_request->DataLength = htole32(cmd->length);
985 switch (ccb_h->flags & CAM_DIR_MASK) {
994 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
998 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
999 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr);
1001 ld = MR_TargetIdToLdGet(device_id, map_ptr);
1002 if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) {
1003 io_request->RaidContext.raid_context.regLockFlags = 0;
1006 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr))
1007 fp_possible = io_info.fpOkForIo;
1010 raid = MR_LdRaidGet(ld, map_ptr);
1011 /* Store the TM capability value in cmd */
1012 cmd->tmCapable = raid->capability.tmCapable;
1014 cmd->request_desc->SCSIIO.MSIxIndex =
1015 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1017 if (sc->is_ventura || sc->is_aero) {
1018 if (sc->streamDetectByLD) {
1019 mtx_lock(&sc->stream_lock);
1020 mrsas_stream_detect(sc, cmd, &io_info);
1021 mtx_unlock(&sc->stream_lock);
1022 /* In ventura if stream detected for a read and
1023 * it is read ahead capable make this IO as LDIO */
1024 if (io_request->RaidContext.raid_context_g35.streamDetected &&
1025 io_info.isRead && io_info.raCapable)
1026 fp_possible = FALSE;
1029 /* Set raid 1/10 fast path write capable bit in io_info.
1030 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible
1031 * disabled after this point. Try not to add more check for
1032 * fp_possible toggle after this.
1035 (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) &&
1036 (raid->level == 1) && !io_info.isRead) {
1037 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) {
1038 fp_possible = FALSE;
1039 mrsas_atomic_dec(&sc->fw_outstanding);
1041 r1_cmd = mrsas_get_mpt_cmd(sc);
1043 fp_possible = FALSE;
1044 mrsas_atomic_dec(&sc->fw_outstanding);
1047 cmd->peer_cmd = r1_cmd;
1048 r1_cmd->peer_cmd = cmd;
1055 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
1056 start_lba_lo, ld_block_size);
1057 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1058 cmd->request_desc->SCSIIO.RequestFlags =
1059 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1060 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1061 if (sc->mrsas_gen3_ctrl) {
1062 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
1063 cmd->request_desc->SCSIIO.RequestFlags =
1064 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1065 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1066 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1067 io_request->RaidContext.raid_context.nseg = 0x1;
1068 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1069 io_request->RaidContext.raid_context.regLockFlags |=
1070 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1071 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1072 } else if (sc->is_ventura || sc->is_aero) {
1073 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1074 io_request->RaidContext.raid_context_g35.nseg = 0x1;
1075 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1076 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1077 if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) {
1078 io_request->RaidContext.raid_context_g35.RAIDFlags =
1079 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
1080 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1083 if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
1086 mrsas_get_updated_dev_handle(sc,
1087 &sc->load_balance_info[device_id], &io_info);
1088 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
1089 cmd->pd_r1_lb = io_info.pd_after_lb;
1090 if (sc->is_ventura || sc->is_aero)
1091 io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm;
1093 io_request->RaidContext.raid_context.spanArm = io_info.span_arm;
1095 cmd->load_balance = 0;
1097 if (sc->is_ventura || sc->is_aero)
1098 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
1100 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
1102 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1103 io_request->DevHandle = io_info.devHandle;
1104 cmd->pdInterface = io_info.pdInterface;
1107 io_request->RaidContext.raid_context.timeoutValue = htole16(map_ptr->raidMap.fpPdIoTimeoutSec);
1108 cmd->request_desc->SCSIIO.RequestFlags =
1109 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
1110 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1111 if (sc->mrsas_gen3_ctrl) {
1112 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
1113 cmd->request_desc->SCSIIO.RequestFlags =
1114 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1115 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1116 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1117 io_request->RaidContext.raid_context.regLockFlags |=
1118 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1119 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1120 io_request->RaidContext.raid_context.nseg = 0x1;
1121 } else if (sc->is_ventura || sc->is_aero) {
1122 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1123 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1124 io_request->RaidContext.raid_context_g35.nseg = 0x1;
1126 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1127 io_request->DevHandle = htole16(device_id);
1133 * mrsas_build_ldio_nonrw: Builds an LDIO command
1134 * input: Adapter instance soft state
1135 * Pointer to command packet
1138 * This function builds the LDIO command packet. It returns 0 if the command is
1139 * built successfully, otherwise it returns a 1.
1142 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1145 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1146 u_int32_t device_id, ld;
1147 MR_DRV_RAID_MAP_ALL *map_ptr;
1149 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1151 io_request = cmd->io_request;
1152 device_id = ccb_h->target_id;
1154 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1155 ld = MR_TargetIdToLdGet(device_id, map_ptr);
1156 raid = MR_LdRaidGet(ld, map_ptr);
1157 /* Store the TM capability value in cmd */
1158 cmd->tmCapable = raid->capability.tmCapable;
1160 /* FW path for LD Non-RW (SCSI management commands) */
1161 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1162 io_request->DevHandle = device_id;
1163 cmd->request_desc->SCSIIO.RequestFlags =
1164 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1165 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1167 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1168 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1169 io_request->DataLength = cmd->length;
1171 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1172 if (sc->is_ventura || sc->is_aero)
1173 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1176 * numSGE store lower 8 bit of sge_count. numSGEExt store
1177 * higher 8 bit of sge_count
1179 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1180 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1183 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1190 * mrsas_build_syspdio: Builds an DCDB command
1191 * input: Adapter instance soft state
1192 * Pointer to command packet
1195 * This function builds the DCDB inquiry command. It returns 0 if the command
1196 * is built successfully, otherwise it returns a 1.
1199 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1200 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
1202 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1203 u_int32_t device_id;
1204 MR_DRV_RAID_MAP_ALL *local_map_ptr;
1205 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1206 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1208 io_request = cmd->io_request;
1209 device_id = ccb_h->target_id;
1210 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1211 io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1212 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1213 io_request->RaidContext.raid_context.regLockFlags = 0;
1214 io_request->RaidContext.raid_context.regLockRowLBA = 0;
1215 io_request->RaidContext.raid_context.regLockLength = 0;
1217 cmd->pdInterface = sc->target_list[device_id].interface_type;
1219 /* If FW supports PD sequence number */
1220 if (sc->use_seqnum_jbod_fp &&
1221 sc->pd_list[device_id].driveType == 0x00) {
1222 //printf("Using Drv seq num\n");
1223 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
1224 cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable;
1225 /* More than 256 PD/JBOD support for Ventura */
1226 if (sc->support_morethan256jbod)
1227 io_request->RaidContext.raid_context.VirtualDiskTgtId =
1228 pd_sync->seq[device_id].pdTargetId;
1230 io_request->RaidContext.raid_context.VirtualDiskTgtId =
1231 htole16(device_id + 255);
1232 io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum;
1233 io_request->DevHandle = pd_sync->seq[device_id].devHandle;
1234 if (sc->is_ventura || sc->is_aero)
1235 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1237 io_request->RaidContext.raid_context.regLockFlags |=
1238 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1239 /* raid_context.Type = MPI2_TYPE_CUDA is valid only,
1240 * if FW support Jbod Sequence number
1242 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1243 io_request->RaidContext.raid_context.nseg = 0x1;
1244 } else if (sc->fast_path_io) {
1245 //printf("Using LD RAID map\n");
1246 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1247 io_request->RaidContext.raid_context.configSeqNum = 0;
1248 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1249 io_request->DevHandle =
1250 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1252 //printf("Using FW PATH\n");
1253 /* Want to send all IO via FW path */
1254 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1255 io_request->RaidContext.raid_context.configSeqNum = 0;
1256 io_request->DevHandle = MR_DEVHANDLE_INVALID;
1259 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1260 cmd->request_desc->SCSIIO.MSIxIndex =
1261 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1264 /* system pd firmware path */
1265 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1266 cmd->request_desc->SCSIIO.RequestFlags =
1267 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1268 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1269 io_request->RaidContext.raid_context.timeoutValue =
1270 htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1271 io_request->RaidContext.raid_context.VirtualDiskTgtId = htole16(device_id);
1273 /* system pd fast path */
1274 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1275 io_request->RaidContext.raid_context.timeoutValue = htole16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
1278 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
1279 * Because the NON RW cmds will now go via FW Queue
1280 * and not the Exception queue
1282 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero)
1283 io_request->IoFlags |= htole16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
1285 cmd->request_desc->SCSIIO.RequestFlags =
1286 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1287 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1290 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1291 io_request->DataLength = htole32(cmd->length);
1293 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1294 if (sc->is_ventura || sc->is_aero)
1295 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1298 * numSGE store lower 8 bit of sge_count. numSGEExt store
1299 * higher 8 bit of sge_count
1301 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1302 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1305 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1312 * mrsas_is_prp_possible: This function will tell whether PRPs should be built or not
1313 * sc: Adapter instance soft state
1314 * cmd: MPT command frame pointer
1315 * nsesg: Number of OS SGEs
1317 * This function will check whether IO is qualified to build PRPs
1318 * return: true: if PRP should be built
1319 * false: if IEEE SGLs should be built
1321 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
1322 bus_dma_segment_t *segs, int nsegs)
1324 struct mrsas_softc *sc = cmd->sc;
1326 u_int32_t data_length = 0;
1327 bool build_prp = false;
1328 u_int32_t mr_nvme_pg_size;
1330 mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE);
1331 data_length = cmd->length;
1333 if (data_length > (mr_nvme_pg_size * 5))
1335 else if ((data_length > (mr_nvme_pg_size * 4)) &&
1336 (data_length <= (mr_nvme_pg_size * 5))) {
1337 /* check if 1st SG entry size is < residual beyond 4 pages */
1338 if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4)))
1342 /*check for SGE holes here*/
1343 for (i = 0; i < nsegs; i++) {
1344 /* check for mid SGEs */
1345 if ((i != 0) && (i != (nsegs - 1))) {
1346 if ((segs[i].ds_addr % mr_nvme_pg_size) ||
1347 (segs[i].ds_len % mr_nvme_pg_size)) {
1349 mrsas_atomic_inc(&sc->sge_holes);
1354 /* check for first SGE*/
1355 if ((nsegs > 1) && (i == 0)) {
1356 if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) {
1358 mrsas_atomic_inc(&sc->sge_holes);
1363 /* check for Last SGE*/
1364 if ((nsegs > 1) && (i == (nsegs - 1))) {
1365 if (segs[i].ds_addr % mr_nvme_pg_size) {
1367 mrsas_atomic_inc(&sc->sge_holes);
1377 * mrsas_map_request: Map and load data
1378 * input: Adapter instance soft state
1379 * Pointer to command packet
1381 * For data from OS, map and load the data buffer into bus space. The SG list
1382 * is built in the callback. If the bus dmamap load is not successful,
1383 * cmd->error_code will contain the error code and a 1 is returned.
1386 mrsas_map_request(struct mrsas_softc *sc,
1387 struct mrsas_mpt_cmd *cmd, union ccb *ccb)
1389 u_int32_t retcode = 0;
1390 struct cam_sim *sim;
1392 sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
1394 if (cmd->data != NULL) {
1395 /* Map data buffer into bus space */
1396 mtx_lock(&sc->io_lock);
1397 retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb,
1398 mrsas_data_load_cb, cmd, 0);
1399 mtx_unlock(&sc->io_lock);
1401 device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
1402 if (retcode == EINPROGRESS) {
1403 device_printf(sc->mrsas_dev, "request load in progress\n");
1404 mrsas_freeze_simq(cmd, sim);
1407 if (cmd->error_code)
1413 * mrsas_unmap_request: Unmap and unload data
1414 * input: Adapter instance soft state
1415 * Pointer to command packet
1417 * This function unmaps and unloads data from OS.
1420 mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1422 if (cmd->data != NULL) {
1423 if (cmd->flags & MRSAS_DIR_IN)
1424 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
1425 if (cmd->flags & MRSAS_DIR_OUT)
1426 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
1427 mtx_lock(&sc->io_lock);
1428 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
1429 mtx_unlock(&sc->io_lock);
1434 * mrsas_build_ieee_sgl - Prepare IEEE SGLs
1435 * @sc: Adapter soft state
1436 * @segs: OS SGEs pointers
1437 * @nseg: Number of OS SGEs
1438 * @cmd: Fusion command frame
1441 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1443 struct mrsas_softc *sc = cmd->sc;
1444 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1445 pMpi25IeeeSgeChain64_t sgl_ptr;
1446 int i = 0, sg_processed = 0;
1448 io_request = cmd->io_request;
1449 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
1451 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1452 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
1454 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
1455 sgl_ptr_end->Flags = 0;
1458 for (i = 0; i < nseg; i++) {
1459 sgl_ptr->Address = htole64(segs[i].ds_addr);
1460 sgl_ptr->Length = htole32(segs[i].ds_len);
1462 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1464 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1467 sg_processed = i + 1;
1468 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
1469 (nseg > sc->max_sge_in_main_msg)) {
1470 pMpi25IeeeSgeChain64_t sg_chain;
1472 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1473 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1474 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1475 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1477 cmd->io_request->ChainOffset = 0;
1479 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1481 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero)
1482 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1484 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1485 sg_chain->Length = htole32((sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)));
1486 sg_chain->Address = htole64(cmd->chain_frame_phys_addr);
1487 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
1494 * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1495 * @sc: Adapter soft state
1496 * @segs: OS SGEs pointers
1497 * @nseg: Number of OS SGEs
1498 * @cmd: Fusion command frame
1501 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1503 struct mrsas_softc *sc = cmd->sc;
1504 int sge_len, offset, num_prp_in_chain = 0;
1505 pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr;
1507 bus_addr_t ptr_sgl_phys;
1509 u_int32_t page_mask, page_mask_result, i = 0;
1510 u_int32_t first_prp_len;
1511 int data_len = cmd->length;
1512 u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size,
1513 MR_DEFAULT_NVME_PAGE_SIZE);
1515 sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL;
1517 * NVMe has a very convoluted PRP format. One PRP is required
1518 * for each page or partial page. We need to split up OS SG
1519 * entries if they are longer than one page or cross a page
1520 * boundary. We also have to insert a PRP list pointer entry as
1521 * the last entry in each physical page of the PRP list.
1523 * NOTE: The first PRP "entry" is actually placed in the first
1524 * SGL entry in the main message in IEEE 64 format. The 2nd
1525 * entry in the main message is the chain element, and the rest
1526 * of the PRP entries are built in the contiguous PCIe buffer.
1528 page_mask = mr_nvme_pg_size - 1;
1529 ptr_sgl = (u_int64_t *) cmd->chain_frame;
1530 ptr_sgl_phys = cmd->chain_frame_phys_addr;
1531 memset(ptr_sgl, 0, sc->max_chain_frame_sz);
1533 /* Build chain frame element which holds all PRPs except first*/
1534 main_chain_element = (pMpi25IeeeSgeChain64_t)
1535 ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64));
1537 main_chain_element->Address = cmd->chain_frame_phys_addr;
1538 main_chain_element->NextChainOffset = 0;
1539 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1540 IEEE_SGE_FLAGS_SYSTEM_ADDR |
1541 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
1543 /* Build first PRP, SGE need not to be PAGE aligned*/
1544 ptr_first_sgl = sgl_ptr;
1545 sge_addr = segs[i].ds_addr;
1546 sge_len = segs[i].ds_len;
1549 offset = (u_int32_t) (sge_addr & page_mask);
1550 first_prp_len = mr_nvme_pg_size - offset;
1552 ptr_first_sgl->Address = sge_addr;
1553 ptr_first_sgl->Length = first_prp_len;
1555 data_len -= first_prp_len;
1557 if (sge_len > first_prp_len) {
1558 sge_addr += first_prp_len;
1559 sge_len -= first_prp_len;
1560 } else if (sge_len == first_prp_len) {
1561 sge_addr = segs[i].ds_addr;
1562 sge_len = segs[i].ds_len;
1567 offset = (u_int32_t) (sge_addr & page_mask);
1569 /* Put PRP pointer due to page boundary*/
1570 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
1571 if (!page_mask_result) {
1572 device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary"
1573 " ptr_sgl: 0x%p\n", ptr_sgl);
1575 *ptr_sgl = (uintptr_t)ptr_sgl_phys;
1580 *ptr_sgl = sge_addr;
1585 sge_addr += mr_nvme_pg_size;
1586 sge_len -= mr_nvme_pg_size;
1587 data_len -= mr_nvme_pg_size;
1595 sge_addr = segs[i].ds_addr;
1596 sge_len = segs[i].ds_len;
1600 main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t);
1601 mrsas_atomic_inc(&sc->prp_count);
1606 * mrsas_data_load_cb: Callback entry point to build SGLs
1607 * input: Pointer to command packet as argument
1608 * Pointer to segment
1609 * Number of segments Error
1611 * This is the callback function of the bus dma map load. It builds SG list
1614 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1616 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
1617 struct mrsas_softc *sc = cmd->sc;
1618 boolean_t build_prp = false;
1621 cmd->error_code = error;
1622 device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error);
1623 if (error == EFBIG) {
1624 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1628 if (cmd->flags & MRSAS_DIR_IN)
1629 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1630 BUS_DMASYNC_PREREAD);
1631 if (cmd->flags & MRSAS_DIR_OUT)
1632 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1633 BUS_DMASYNC_PREWRITE);
1635 /* Check for whether PRPs should be built or IEEE SGLs*/
1636 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
1637 (cmd->pdInterface == NVME_PD))
1638 build_prp = mrsas_is_prp_possible(cmd, segs, nseg);
1640 if (build_prp == true)
1641 mrsas_build_prp_nvme(cmd, segs, nseg);
1643 mrsas_build_ieee_sgl(cmd, segs, nseg);
1645 cmd->sge_count = nseg;
1649 * mrsas_freeze_simq: Freeze SIM queue
1650 * input: Pointer to command packet
1653 * This function freezes the sim queue.
1656 mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
1658 union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
1660 xpt_freeze_simq(sim, 1);
1661 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1662 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1666 mrsas_xpt_freeze(struct mrsas_softc *sc)
1668 xpt_freeze_simq(sc->sim_0, 1);
1669 xpt_freeze_simq(sc->sim_1, 1);
1673 mrsas_xpt_release(struct mrsas_softc *sc)
1675 xpt_release_simq(sc->sim_0, 1);
1676 xpt_release_simq(sc->sim_1, 1);
1680 * mrsas_cmd_done: Perform remaining command completion
1681 * input: Adapter instance soft state Pointer to command packet
1683 * This function calls ummap request and releases the MPT command.
1686 mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1688 mrsas_unmap_request(sc, cmd);
1690 mtx_lock(&sc->sim_lock);
1691 if (cmd->callout_owner) {
1692 callout_stop(&cmd->cm_callout);
1693 cmd->callout_owner = false;
1695 xpt_done(cmd->ccb_ptr);
1696 cmd->ccb_ptr = NULL;
1697 mtx_unlock(&sc->sim_lock);
1698 mrsas_release_mpt_cmd(cmd);
1702 * mrsas_cam_poll: Polling entry point
1703 * input: Pointer to SIM
1705 * This is currently a stub function.
1708 mrsas_cam_poll(struct cam_sim *sim)
1711 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
1713 if (sc->msix_vectors != 0){
1714 for (i=0; i<sc->msix_vectors; i++){
1715 mrsas_complete_cmd(sc, i);
1718 mrsas_complete_cmd(sc, 0);
1723 * mrsas_bus_scan: Perform bus scan
1724 * input: Adapter instance soft state
1726 * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not
1727 * be called in FreeBSD 8.x and later versions, where the bus scan is
1731 mrsas_bus_scan(struct mrsas_softc *sc)
1736 if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
1739 if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
1740 xpt_free_ccb(ccb_0);
1743 mtx_lock(&sc->sim_lock);
1744 if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
1745 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1746 xpt_free_ccb(ccb_0);
1747 xpt_free_ccb(ccb_1);
1748 mtx_unlock(&sc->sim_lock);
1751 if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
1752 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1753 xpt_free_ccb(ccb_0);
1754 xpt_free_ccb(ccb_1);
1755 mtx_unlock(&sc->sim_lock);
1758 mtx_unlock(&sc->sim_lock);
1766 * mrsas_bus_scan_sim: Perform bus scan per SIM
1767 * input: adapter instance soft state
1769 * This function will be called from Event handler on LD creation/deletion,
1773 mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
1777 if ((ccb = xpt_alloc_ccb()) == NULL) {
1780 mtx_lock(&sc->sim_lock);
1781 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
1782 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1784 mtx_unlock(&sc->sim_lock);
1787 mtx_unlock(&sc->sim_lock);
1794 * mrsas_track_scsiio: Track IOs for a given target in the mpt_cmd_list
1795 * input: Adapter instance soft state
1796 * Target ID of target
1797 * Bus ID of the target
1799 * This function checks for any pending IO in the whole mpt_cmd_list pool
1800 * with the bus_id and target_id passed in arguments. If some IO is found
1801 * that means target reset is not successfully completed.
1803 * Returns FAIL if IOs pending to the target device, else return SUCCESS
1806 mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id)
1809 struct mrsas_mpt_cmd *mpt_cmd = NULL;
1811 for (i = 0 ; i < sc->max_fw_cmds; i++) {
1812 mpt_cmd = sc->mpt_cmd_list[i];
1815 * Check if the target_id and bus_id is same as the timeout IO
1817 if (mpt_cmd->ccb_ptr) {
1818 /* bus_id = 1 denotes a VD */
1821 (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1));
1823 if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id &&
1824 mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) {
1825 device_printf(sc->mrsas_dev,
1826 "IO commands pending to target id %d\n", tgt_id);
1837 * mrsas_tm_response_code: Prints TM response code received from FW
1838 * input: Adapter instance soft state
1839 * MPI reply returned from firmware
1844 mrsas_tm_response_code(struct mrsas_softc *sc,
1845 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
1849 switch (mpi_reply->ResponseCode) {
1850 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1851 desc = "task management request completed";
1853 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1854 desc = "invalid frame";
1856 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1857 desc = "task management request not supported";
1859 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1860 desc = "task management request failed";
1862 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1863 desc = "task management request succeeded";
1865 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1866 desc = "invalid lun";
1869 desc = "overlapped tag attempted";
1871 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1872 desc = "task queued, however not sent to target";
1878 device_printf(sc->mrsas_dev, "response_code(%01x): %s\n",
1879 mpi_reply->ResponseCode, desc);
1880 device_printf(sc->mrsas_dev,
1881 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n"
1882 "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
1883 mpi_reply->TerminationCount, mpi_reply->DevHandle,
1884 mpi_reply->Function, mpi_reply->TaskType,
1885 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
1890 * mrsas_issue_tm: Fires the TM command to FW and waits for completion
1891 * input: Adapter instance soft state
1892 * request descriptor compiled by mrsas_reset_targets
1894 * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS.
1897 mrsas_issue_tm(struct mrsas_softc *sc,
1898 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc)
1902 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
1903 sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz);
1905 if (sleep_stat == EWOULDBLOCK) {
1906 device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n");
1914 * mrsas_reset_targets : Gathers info to fire a target reset command
1915 * input: Adapter instance soft state
1917 * This function compiles data for a target reset command to be fired to the FW
1918 * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs.
1920 * Returns SUCCESS or FAIL
1922 int mrsas_reset_targets(struct mrsas_softc *sc)
1924 struct mrsas_mpt_cmd *tm_mpt_cmd = NULL;
1925 struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL;
1926 MR_TASK_MANAGE_REQUEST *mr_request;
1927 MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request;
1928 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1929 int retCode = FAIL, count, i, outstanding;
1930 u_int32_t MSIxIndex, bus_id;
1933 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
1936 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
1939 device_printf(sc->mrsas_dev, "NO IOs pending...\n");
1940 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
1943 } else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) {
1944 device_printf(sc->mrsas_dev, "Controller is not operational\n");
1947 /* Some more error checks will be added in future */
1950 /* Get an mpt frame and an index to fire the TM cmd */
1951 tm_mpt_cmd = mrsas_get_mpt_cmd(sc);
1957 req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1);
1959 device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n");
1963 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
1965 req_desc->HighPriority.SMID = tm_mpt_cmd->index;
1966 req_desc->HighPriority.RequestFlags =
1967 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1968 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1969 req_desc->HighPriority.MSIxIndex = 0;
1970 req_desc->HighPriority.LMID = 0;
1971 req_desc->HighPriority.Reserved1 = 0;
1972 tm_mpt_cmd->request_desc = req_desc;
1974 mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request;
1975 memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST));
1977 tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
1978 tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1979 tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
1980 tm_mpi_request->TaskMID = 0; /* smid task */
1981 tm_mpi_request->LUN[1] = 0;
1983 /* Traverse the tm_mpt pool to get valid entries */
1984 for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) {
1985 if(!sc->target_reset_pool[i]) {
1988 tgt_mpt_cmd = sc->target_reset_pool[i];
1993 /* See if the target is tm capable or NOT */
1994 if (!tgt_mpt_cmd->tmCapable) {
1995 device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for "
1996 "CAM target:%d\n", tgt_id);
2002 tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle;
2004 if (i < (MRSAS_MAX_PD - 1)) {
2005 mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1;
2008 mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1;
2012 device_printf(sc->mrsas_dev, "TM will be fired for "
2013 "CAM target:%d and bus_id %d\n", tgt_id, bus_id);
2015 sc->ocr_chan = (void *)&tm_mpt_cmd;
2016 retCode = mrsas_issue_tm(sc, req_desc);
2017 if (retCode == FAIL)
2022 (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply;
2023 mrsas_tm_response_code(sc, mpi_reply);
2025 mrsas_atomic_dec(&sc->target_reset_outstanding);
2026 sc->target_reset_pool[i] = NULL;
2028 /* Check for pending cmds in the mpt_cmd_pool with the tgt_id */
2029 mrsas_disable_intr(sc);
2030 /* Wait for 1 second to complete parallel ISR calling same
2031 * mrsas_complete_cmd()
2033 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup",
2035 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2036 mtx_unlock(&sc->sim_lock);
2037 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2038 mrsas_complete_cmd(sc, MSIxIndex);
2039 mtx_lock(&sc->sim_lock);
2040 retCode = mrsas_track_scsiio(sc, tgt_id, bus_id);
2041 mrsas_enable_intr(sc);
2043 if (retCode == FAIL)
2047 device_printf(sc->mrsas_dev, "Number of targets outstanding "
2048 "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding));
2051 mrsas_release_mpt_cmd(tm_mpt_cmd);
2053 device_printf(sc->mrsas_dev, "target reset %s!!\n",
2054 (retCode == SUCCESS) ? "SUCCESS" : "FAIL");