2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include "dev/mrsas/mrsas.h"
39 #include <cam/cam_ccb.h>
40 #include <cam/cam_sim.h>
41 #include <cam/cam_xpt_sim.h>
42 #include <cam/cam_debug.h>
43 #include <cam/cam_periph.h>
44 #include <cam/cam_xpt_periph.h>
46 #include <cam/scsi/scsi_all.h>
47 #include <cam/scsi/scsi_message.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
51 #include <sys/time.h> /* XXX for pcpu.h */
52 #include <sys/pcpu.h> /* XXX for PCPU_GET */
54 #define smp_processor_id() PCPU_GET(cpuid)
59 int mrsas_cam_attach(struct mrsas_softc *sc);
60 int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb);
61 int mrsas_bus_scan(struct mrsas_softc *sc);
62 int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
64 mrsas_map_request(struct mrsas_softc *sc,
65 struct mrsas_mpt_cmd *cmd, union ccb *ccb);
67 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
70 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
73 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
74 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible);
76 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
77 union ccb *ccb, u_int32_t device_id,
78 MRSAS_RAID_SCSI_IO_REQUEST * io_request);
79 void mrsas_xpt_freeze(struct mrsas_softc *sc);
80 void mrsas_xpt_release(struct mrsas_softc *sc);
81 void mrsas_cam_detach(struct mrsas_softc *sc);
82 void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
83 void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
84 void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
86 mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
87 u_int32_t req_desc_hi);
89 mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request,
90 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb,
91 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag,
92 u_int32_t ld_block_size);
93 static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim);
94 static void mrsas_cam_poll(struct cam_sim *sim);
95 static void mrsas_action(struct cam_sim *sim, union ccb *ccb);
96 static void mrsas_scsiio_timeout(void *data);
97 static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id);
98 static void mrsas_tm_response_code(struct mrsas_softc *sc,
99 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply);
100 static int mrsas_issue_tm(struct mrsas_softc *sc,
101 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc);
103 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
104 int nseg, int error);
106 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
109 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
110 bus_dma_segment_t *segs, int nsegs);
111 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd,
112 bus_dma_segment_t *segs, int nseg);
113 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd,
114 bus_dma_segment_t *segs, int nseg);
116 struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
117 MRSAS_REQUEST_DESCRIPTOR_UNION *
118 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
120 extern int mrsas_reset_targets(struct mrsas_softc *sc);
121 extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
123 MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map);
124 extern void mrsas_isr(void *arg);
125 extern void mrsas_aen_handler(struct mrsas_softc *sc);
127 MR_BuildRaidContext(struct mrsas_softc *sc,
128 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context,
129 MR_DRV_RAID_MAP_ALL * map);
131 MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span,
132 MR_DRV_RAID_MAP_ALL * map);
134 mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
135 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info);
136 extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
137 extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
138 extern void mrsas_disable_intr(struct mrsas_softc *sc);
139 extern void mrsas_enable_intr(struct mrsas_softc *sc);
140 void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
141 struct mrsas_mpt_cmd *cmd);
144 * mrsas_cam_attach: Main entry to CAM subsystem
145 * input: Adapter instance soft state
147 * This function is called from mrsas_attach() during initialization to perform
148 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or
149 * earlier, it would also initiate a bus scan.
152 mrsas_cam_attach(struct mrsas_softc *sc)
154 struct cam_devq *devq;
157 mrsas_cam_depth = sc->max_scsi_cmds;
159 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) {
160 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n");
164 * Create SIM for bus 0 and register, also create path
166 sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
167 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
168 mrsas_cam_depth, devq);
169 if (sc->sim_0 == NULL) {
171 device_printf(sc->mrsas_dev, "Cannot register SIM\n");
174 /* Initialize taskqueue for Event Handling */
175 TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc);
176 sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO,
177 taskqueue_thread_enqueue, &sc->ev_tq);
179 /* Run the task queue with lowest priority */
180 taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq",
181 device_get_nameunit(sc->mrsas_dev));
182 mtx_lock(&sc->sim_lock);
183 if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) {
184 cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */
185 mtx_unlock(&sc->sim_lock);
188 if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0),
189 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
190 xpt_bus_deregister(cam_sim_path(sc->sim_0));
191 cam_sim_free(sc->sim_0, TRUE); /* passing true will free the
193 mtx_unlock(&sc->sim_lock);
196 mtx_unlock(&sc->sim_lock);
199 * Create SIM for bus 1 and register, also create path
201 sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc,
202 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth,
203 mrsas_cam_depth, devq);
204 if (sc->sim_1 == NULL) {
206 device_printf(sc->mrsas_dev, "Cannot register SIM\n");
209 mtx_lock(&sc->sim_lock);
210 if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) {
211 cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */
212 mtx_unlock(&sc->sim_lock);
215 if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1),
217 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
218 xpt_bus_deregister(cam_sim_path(sc->sim_1));
219 cam_sim_free(sc->sim_1, TRUE);
220 mtx_unlock(&sc->sim_lock);
223 mtx_unlock(&sc->sim_lock);
225 #if (__FreeBSD_version <= 704000)
226 if (mrsas_bus_scan(sc)) {
227 device_printf(sc->mrsas_dev, "Error in bus scan.\n");
235 * mrsas_cam_detach: De-allocates and teardown CAM
236 * input: Adapter instance soft state
238 * De-registers and frees the paths and SIMs.
241 mrsas_cam_detach(struct mrsas_softc *sc)
243 if (sc->ev_tq != NULL)
244 taskqueue_free(sc->ev_tq);
245 mtx_lock(&sc->sim_lock);
247 xpt_free_path(sc->path_0);
249 xpt_bus_deregister(cam_sim_path(sc->sim_0));
250 cam_sim_free(sc->sim_0, FALSE);
253 xpt_free_path(sc->path_1);
255 xpt_bus_deregister(cam_sim_path(sc->sim_1));
256 cam_sim_free(sc->sim_1, TRUE);
258 mtx_unlock(&sc->sim_lock);
262 * mrsas_action: SIM callback entry point
263 * input: pointer to SIM pointer to CAM Control Block
265 * This function processes CAM subsystem requests. The type of request is stored
266 * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because
267 * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier.
270 mrsas_action(struct cam_sim *sim, union ccb *ccb)
272 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
273 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
277 * Check if the system going down
278 * or the adapter is in unrecoverable critical error
280 if (sc->remove_in_progress ||
281 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) {
282 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
287 switch (ccb->ccb_h.func_code) {
290 device_id = ccb_h->target_id;
293 * bus 0 is LD, bus 1 is for system-PD
295 if (cam_sim_bus(sim) == 1 &&
296 sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) {
297 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
300 if (mrsas_startio(sc, sim, ccb)) {
301 ccb->ccb_h.status |= CAM_REQ_INVALID;
309 ccb->ccb_h.status = CAM_UA_ABORT;
318 case XPT_GET_TRAN_SETTINGS:
320 ccb->cts.protocol = PROTO_SCSI;
321 ccb->cts.protocol_version = SCSI_REV_2;
322 ccb->cts.transport = XPORT_SPI;
323 ccb->cts.transport_version = 2;
324 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
325 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
326 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
327 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
328 ccb->ccb_h.status = CAM_REQ_CMP;
332 case XPT_SET_TRAN_SETTINGS:
334 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
338 case XPT_CALC_GEOMETRY:
340 cam_calc_geometry(&ccb->ccg, 1);
346 ccb->cpi.version_num = 1;
347 ccb->cpi.hba_inquiry = 0;
348 ccb->cpi.target_sprt = 0;
349 #if (__FreeBSD_version >= 902001)
350 ccb->cpi.hba_misc = PIM_UNMAPPED;
352 ccb->cpi.hba_misc = 0;
354 ccb->cpi.hba_eng_cnt = 0;
355 ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS;
356 ccb->cpi.unit_number = cam_sim_unit(sim);
357 ccb->cpi.bus_id = cam_sim_bus(sim);
358 ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID;
359 ccb->cpi.base_transfer_speed = 150000;
360 strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
361 strlcpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN);
362 strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
363 ccb->cpi.transport = XPORT_SPI;
364 ccb->cpi.transport_version = 2;
365 ccb->cpi.protocol = PROTO_SCSI;
366 ccb->cpi.protocol_version = SCSI_REV_2;
367 if (ccb->cpi.bus_id == 0)
368 ccb->cpi.max_target = MRSAS_MAX_PD - 1;
370 ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
371 #if (__FreeBSD_version > 704000)
372 ccb->cpi.maxio = sc->max_num_sge * MRSAS_PAGE_SIZE;
374 ccb->ccb_h.status = CAM_REQ_CMP;
380 ccb->ccb_h.status = CAM_REQ_INVALID;
388 * mrsas_scsiio_timeout: Callback function for IO timed out
389 * input: mpt command context
391 * This function will execute after timeout value provided by ccb header from
392 * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO
393 * coming from CAM layer. This function is callback function for IO timeout
394 * and it runs in no-sleep context. Set do_timedout_reset in Adapter context
395 * so that it will execute OCR/Kill adpter from ocr_thread context.
398 mrsas_scsiio_timeout(void *data)
400 struct mrsas_mpt_cmd *cmd;
401 struct mrsas_softc *sc;
407 cmd = (struct mrsas_mpt_cmd *)data;
410 if (cmd->ccb_ptr == NULL) {
411 printf("command timeout with NULL ccb\n");
416 * Below callout is dummy entry so that it will be cancelled from
417 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based
418 * on OCR enable/disable property of Controller from ocr_thread
421 #if (__FreeBSD_version >= 1000510)
422 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
423 mrsas_scsiio_timeout, cmd, 0);
425 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
426 mrsas_scsiio_timeout, cmd);
429 if (cmd->ccb_ptr->cpi.bus_id == 0)
430 target_id = cmd->ccb_ptr->ccb_h.target_id;
432 target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1));
434 /* Save the cmd to be processed for TM, if it is not there in the array */
435 if (sc->target_reset_pool[target_id] == NULL) {
436 sc->target_reset_pool[target_id] = cmd;
437 mrsas_atomic_inc(&sc->target_reset_outstanding);
444 * mrsas_startio: SCSI IO entry point
445 * input: Adapter instance soft state
446 * pointer to CAM Control Block
448 * This function is the SCSI IO entry point and it initiates IO processing. It
449 * copies the IO and depending if the IO is read/write or inquiry, it would
450 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0
451 * if the command is sent to firmware successfully, otherwise it returns 1.
454 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
457 struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL;
458 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
459 struct ccb_scsiio *csio = &(ccb->csio);
460 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
463 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE &&
464 (!sc->fw_sync_cache_support)) {
465 ccb->ccb_h.status = CAM_REQ_CMP;
469 ccb_h->status |= CAM_SIM_QUEUED;
470 cmd = mrsas_get_mpt_cmd(sc);
473 ccb_h->status |= CAM_REQUEUE_REQ;
478 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
479 if (ccb_h->flags & CAM_DIR_IN)
480 cmd->flags |= MRSAS_DIR_IN;
481 if (ccb_h->flags & CAM_DIR_OUT)
482 cmd->flags |= MRSAS_DIR_OUT;
484 cmd->flags = MRSAS_DIR_NONE; /* no data */
486 /* For FreeBSD 9.2 and higher */
487 #if (__FreeBSD_version >= 902001)
489 * XXX We don't yet support physical addresses here.
491 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
493 case CAM_DATA_SG_PADDR:
494 device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n",
496 mrsas_release_mpt_cmd(cmd);
497 ccb_h->status = CAM_REQ_INVALID;
498 ccb_h->status &= ~CAM_SIM_QUEUED;
501 device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n",
503 mrsas_release_mpt_cmd(cmd);
504 ccb_h->status = CAM_REQ_INVALID;
507 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
508 mrsas_release_mpt_cmd(cmd);
509 ccb_h->status = CAM_REQ_TOO_BIG;
512 cmd->length = csio->dxfer_len;
514 cmd->data = csio->data_ptr;
517 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
518 mrsas_release_mpt_cmd(cmd);
519 ccb_h->status = CAM_REQ_TOO_BIG;
522 cmd->length = csio->dxfer_len;
524 cmd->data = csio->data_ptr;
527 ccb->ccb_h.status = CAM_REQ_INVALID;
531 if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */
532 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
533 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
534 mrsas_release_mpt_cmd(cmd);
535 ccb_h->status = CAM_REQ_TOO_BIG;
538 cmd->length = csio->dxfer_len;
540 cmd->data = csio->data_ptr;
542 mrsas_release_mpt_cmd(cmd);
543 ccb_h->status = CAM_REQ_INVALID;
546 } else { /* Data addresses are physical. */
547 mrsas_release_mpt_cmd(cmd);
548 ccb_h->status = CAM_REQ_INVALID;
549 ccb_h->status &= ~CAM_SIM_QUEUED;
556 req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1);
558 device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n");
561 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
562 cmd->request_desc = req_desc;
564 if (ccb_h->flags & CAM_CDB_POINTER)
565 bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len);
567 bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len);
568 mtx_lock(&sc->raidmap_lock);
570 /* Check for IO type READ-WRITE targeted for Logical Volume */
571 cmd_type = mrsas_find_io_type(sim, ccb);
573 case READ_WRITE_LDIO:
574 /* Build READ-WRITE IO for Logical Volume */
575 if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
576 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
577 mtx_unlock(&sc->raidmap_lock);
578 mrsas_release_mpt_cmd(cmd);
582 case NON_READ_WRITE_LDIO:
583 /* Build NON READ-WRITE IO for Logical Volume */
584 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
585 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
586 mtx_unlock(&sc->raidmap_lock);
587 mrsas_release_mpt_cmd(cmd);
591 case READ_WRITE_SYSPDIO:
592 case NON_READ_WRITE_SYSPDIO:
593 if (sc->secure_jbod_support &&
594 (cmd_type == NON_READ_WRITE_SYSPDIO)) {
595 /* Build NON-RW IO for JBOD */
596 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
597 device_printf(sc->mrsas_dev,
598 "Build SYSPDIO failed.\n");
599 mtx_unlock(&sc->raidmap_lock);
600 mrsas_release_mpt_cmd(cmd);
604 /* Build RW IO for JBOD */
605 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
606 device_printf(sc->mrsas_dev,
607 "Build SYSPDIO failed.\n");
608 mtx_unlock(&sc->raidmap_lock);
609 mrsas_release_mpt_cmd(cmd);
614 mtx_unlock(&sc->raidmap_lock);
616 if (cmd->flags == MRSAS_DIR_IN) /* from device */
617 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ;
618 else if (cmd->flags == MRSAS_DIR_OUT) /* to device */
619 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE;
621 cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
622 cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
623 cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr;
624 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE;
626 req_desc = cmd->request_desc;
627 req_desc->SCSIIO.SMID = cmd->index;
630 * Start timer for IO timeout. Default timeout value is 90 second.
632 cmd->callout_owner = true;
633 #if (__FreeBSD_version >= 1000510)
634 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0,
635 mrsas_scsiio_timeout, cmd, 0);
637 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000,
638 mrsas_scsiio_timeout, cmd);
641 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->io_cmds_highwater)
642 sc->io_cmds_highwater++;
645 * if it is raid 1/10 fp write capable.
646 * try to get second command from pool and construct it.
647 * From FW, it has confirmed that lba values of two PDs corresponds to
648 * single R1/10 LD are always same
652 * driver side count always should be less than max_fw_cmds to get
655 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
656 mrsas_atomic_inc(&sc->fw_outstanding);
657 mrsas_prepare_secondRaid1_IO(sc, cmd);
658 mrsas_fire_cmd(sc, req_desc->addr.u.low,
659 req_desc->addr.u.high);
660 r1_cmd = cmd->peer_cmd;
661 mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low,
662 r1_cmd->request_desc->addr.u.high);
664 mrsas_fire_cmd(sc, req_desc->addr.u.low,
665 req_desc->addr.u.high);
676 * mrsas_find_io_type: Determines if IO is read/write or inquiry
677 * input: pointer to CAM Control Block
679 * This function determines if the IO is read/write or inquiry. It returns a 1
680 * if the IO is read/write and 0 if it is inquiry.
683 mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb)
685 struct ccb_scsiio *csio = &(ccb->csio);
687 switch (csio->cdb_io.cdb_bytes[0]) {
696 return (cam_sim_bus(sim) ?
697 READ_WRITE_SYSPDIO : READ_WRITE_LDIO);
699 return (cam_sim_bus(sim) ?
700 NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO);
705 * mrsas_get_mpt_cmd: Get a cmd from free command pool
706 * input: Adapter instance soft state
708 * This function removes an MPT command from the command free list and
711 struct mrsas_mpt_cmd *
712 mrsas_get_mpt_cmd(struct mrsas_softc *sc)
714 struct mrsas_mpt_cmd *cmd = NULL;
716 mtx_lock(&sc->mpt_cmd_pool_lock);
717 if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
718 cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
719 TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
724 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
729 cmd->load_balance = 0;
732 mtx_unlock(&sc->mpt_cmd_pool_lock);
737 * mrsas_release_mpt_cmd: Return a cmd to free command pool
738 * input: Command packet for return to free command pool
740 * This function returns an MPT command to the free command list.
743 mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
745 struct mrsas_softc *sc = cmd->sc;
747 mtx_lock(&sc->mpt_cmd_pool_lock);
748 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
749 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
750 cmd->peer_cmd = NULL;
751 cmd->cmd_completed = 0;
752 memset((uint8_t *)cmd->io_request, 0,
753 sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
754 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
755 mtx_unlock(&sc->mpt_cmd_pool_lock);
761 * mrsas_get_request_desc: Get request descriptor from array
762 * input: Adapter instance soft state
765 * This function returns a pointer to the request descriptor.
767 MRSAS_REQUEST_DESCRIPTOR_UNION *
768 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
772 KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range"));
773 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index;
775 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p;
781 /* mrsas_prepare_secondRaid1_IO
782 * It prepares the raid 1 second IO
785 mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc,
786 struct mrsas_mpt_cmd *cmd)
788 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
789 struct mrsas_mpt_cmd *r1_cmd;
791 r1_cmd = cmd->peer_cmd;
792 req_desc = cmd->request_desc;
795 * copy the io request frame as well as 8 SGEs data for r1
798 memcpy(r1_cmd->io_request, cmd->io_request,
799 (sizeof(MRSAS_RAID_SCSI_IO_REQUEST)));
800 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
801 (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION)));
803 /* sense buffer is different for r1 command */
804 r1_cmd->io_request->SenseBufferLowAddress = r1_cmd->sense_phys_addr;
805 r1_cmd->ccb_ptr = cmd->ccb_ptr;
807 req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1);
808 req_desc2->addr.Words = 0;
809 r1_cmd->request_desc = req_desc2;
810 req_desc2->SCSIIO.SMID = r1_cmd->index;
811 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
812 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
813 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
814 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
815 cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
817 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID =
820 * MSIxIndex of both commands request descriptors
823 r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex;
824 /* span arm is different for r1 cmd */
825 r1_cmd->io_request->RaidContext.raid_context_g35.spanArm =
826 cmd->io_request->RaidContext.raid_context_g35.spanArm + 1;
832 * mrsas_build_ldio_rw: Builds an LDIO command
833 * input: Adapter instance soft state
834 * Pointer to command packet
837 * This function builds the LDIO command packet. It returns 0 if the command is
838 * built successfully, otherwise it returns a 1.
841 mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
844 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
845 struct ccb_scsiio *csio = &(ccb->csio);
847 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
849 device_id = ccb_h->target_id;
851 io_request = cmd->io_request;
852 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
853 io_request->RaidContext.raid_context.status = 0;
854 io_request->RaidContext.raid_context.exStatus = 0;
856 /* just the cdb len, other flags zero, and ORed-in later for FP */
857 io_request->IoFlags = csio->cdb_len;
859 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS)
860 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n");
862 io_request->DataLength = cmd->length;
864 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
865 if (cmd->sge_count > sc->max_num_sge) {
866 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
867 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
870 if (sc->is_ventura || sc->is_aero)
871 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
874 * numSGE store lower 8 bit of sge_count. numSGEExt store
875 * higher 8 bit of sge_count
877 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
878 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
882 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
888 /* stream detection on read and and write IOs */
890 mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
891 struct IO_REQUEST_INFO *io_info)
893 u_int32_t device_id = io_info->ldTgtId;
894 LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id];
895 u_int32_t *track_stream = ¤t_ld_SD->mruBitMap;
896 u_int32_t streamNum, shiftedValues, unshiftedValues;
897 u_int32_t indexValueMask, shiftedValuesMask;
899 boolean_t isReadAhead = false;
900 STREAM_DETECT *current_SD;
902 /* find possible stream */
903 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
904 streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) &
906 current_SD = ¤t_ld_SD->streamTrack[streamNum];
908 * if we found a stream, update the raid context and
909 * also update the mruBitMap
911 if (current_SD->nextSeqLBA &&
912 io_info->ldStartBlock >= current_SD->nextSeqLBA &&
913 (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) &&
914 (current_SD->isRead == io_info->isRead)) {
915 if (io_info->ldStartBlock != current_SD->nextSeqLBA &&
916 (!io_info->isRead || !isReadAhead)) {
918 * Once the API availible we need to change this.
919 * At this point we are not allowing any gap
923 cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE;
924 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
926 * update the mruBitMap LRU
928 shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ;
929 shiftedValues = ((*track_stream & shiftedValuesMask) <<
930 BITS_PER_INDEX_STREAM);
931 indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM;
932 unshiftedValues = (*track_stream) &
933 (~(shiftedValuesMask | indexValueMask));
935 (unshiftedValues | shiftedValues | streamNum);
940 * if we did not find any stream, create a new one from the least recently used
942 streamNum = (*track_stream >>
943 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK;
944 current_SD = ¤t_ld_SD->streamTrack[streamNum];
945 current_SD->isRead = io_info->isRead;
946 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks;
947 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum);
953 * mrsas_setup_io: Set up data including Fast Path I/O
954 * input: Adapter instance soft state
955 * Pointer to command packet
958 * This function builds the DCDB inquiry command. It returns 0 if the command
959 * is built successfully, otherwise it returns a 1.
962 mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
963 union ccb *ccb, u_int32_t device_id,
964 MRSAS_RAID_SCSI_IO_REQUEST * io_request)
966 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
967 struct ccb_scsiio *csio = &(ccb->csio);
968 struct IO_REQUEST_INFO io_info;
969 MR_DRV_RAID_MAP_ALL *map_ptr;
970 struct mrsas_mpt_cmd *r1_cmd = NULL;
973 u_int8_t fp_possible;
974 u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld;
975 u_int32_t datalength = 0;
977 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
984 * READ_6 (0x08) or WRITE_6 (0x0A) cdb
986 if (csio->cdb_len == 6) {
987 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4];
988 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) |
989 ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) |
990 (u_int32_t)csio->cdb_io.cdb_bytes[3];
991 start_lba_lo &= 0x1FFFFF;
994 * READ_10 (0x28) or WRITE_6 (0x2A) cdb
996 else if (csio->cdb_len == 10) {
997 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] |
998 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8);
999 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
1000 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
1001 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
1002 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
1005 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb
1007 else if (csio->cdb_len == 12) {
1008 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 |
1009 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
1010 ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) |
1011 ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
1012 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
1013 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
1014 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
1015 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
1018 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb
1020 else if (csio->cdb_len == 16) {
1021 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 |
1022 ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) |
1023 ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) |
1024 ((u_int32_t)csio->cdb_io.cdb_bytes[13]);
1025 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) |
1026 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) |
1027 (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 |
1028 ((u_int32_t)csio->cdb_io.cdb_bytes[9]);
1029 start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) |
1030 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) |
1031 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 |
1032 ((u_int32_t)csio->cdb_io.cdb_bytes[5]);
1034 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
1035 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo;
1036 io_info.numBlocks = datalength;
1037 io_info.ldTgtId = device_id;
1038 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
1040 io_request->DataLength = cmd->length;
1042 switch (ccb_h->flags & CAM_DIR_MASK) {
1051 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK);
1055 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1056 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr);
1058 ld = MR_TargetIdToLdGet(device_id, map_ptr);
1059 if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) {
1060 io_request->RaidContext.raid_context.regLockFlags = 0;
1063 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr))
1064 fp_possible = io_info.fpOkForIo;
1067 raid = MR_LdRaidGet(ld, map_ptr);
1068 /* Store the TM capability value in cmd */
1069 cmd->tmCapable = raid->capability.tmCapable;
1071 cmd->request_desc->SCSIIO.MSIxIndex =
1072 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1074 if (sc->is_ventura || sc->is_aero) {
1075 if (sc->streamDetectByLD) {
1076 mtx_lock(&sc->stream_lock);
1077 mrsas_stream_detect(sc, cmd, &io_info);
1078 mtx_unlock(&sc->stream_lock);
1079 /* In ventura if stream detected for a read and
1080 * it is read ahead capable make this IO as LDIO */
1081 if (io_request->RaidContext.raid_context_g35.streamDetected &&
1082 io_info.isRead && io_info.raCapable)
1083 fp_possible = FALSE;
1086 /* Set raid 1/10 fast path write capable bit in io_info.
1087 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible
1088 * disabled after this point. Try not to add more check for
1089 * fp_possible toggle after this.
1092 (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) &&
1093 (raid->level == 1) && !io_info.isRead) {
1094 r1_cmd = mrsas_get_mpt_cmd(sc);
1096 fp_possible = FALSE;
1097 printf("Avago debug fp disable from %s %d \n",
1098 __func__, __LINE__);
1100 cmd->peer_cmd = r1_cmd;
1101 r1_cmd->peer_cmd = cmd;
1107 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr,
1108 start_lba_lo, ld_block_size);
1109 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1110 cmd->request_desc->SCSIIO.RequestFlags =
1111 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1112 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1113 if (sc->mrsas_gen3_ctrl) {
1114 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
1115 cmd->request_desc->SCSIIO.RequestFlags =
1116 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1117 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1118 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1119 io_request->RaidContext.raid_context.nseg = 0x1;
1120 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1121 io_request->RaidContext.raid_context.regLockFlags |=
1122 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1123 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1124 } else if (sc->is_ventura || sc->is_aero) {
1125 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1126 io_request->RaidContext.raid_context_g35.nseg = 0x1;
1127 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1128 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1129 if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) {
1130 io_request->RaidContext.raid_context_g35.RAIDFlags =
1131 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
1132 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1135 if ((sc->load_balance_info[device_id].loadBalanceFlag) &&
1138 mrsas_get_updated_dev_handle(sc,
1139 &sc->load_balance_info[device_id], &io_info);
1140 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG;
1141 cmd->pd_r1_lb = io_info.pd_after_lb;
1142 if (sc->is_ventura || sc->is_aero)
1143 io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm;
1145 io_request->RaidContext.raid_context.spanArm = io_info.span_arm;
1147 cmd->load_balance = 0;
1149 if (sc->is_ventura || sc->is_aero)
1150 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
1152 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
1154 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
1155 io_request->DevHandle = io_info.devHandle;
1156 cmd->pdInterface = io_info.pdInterface;
1159 io_request->RaidContext.raid_context.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
1160 cmd->request_desc->SCSIIO.RequestFlags =
1161 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
1162 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1163 if (sc->mrsas_gen3_ctrl) {
1164 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED)
1165 cmd->request_desc->SCSIIO.RequestFlags =
1166 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1167 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1168 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1169 io_request->RaidContext.raid_context.regLockFlags |=
1170 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1171 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1172 io_request->RaidContext.raid_context.nseg = 0x1;
1173 } else if (sc->is_ventura || sc->is_aero) {
1174 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA;
1175 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1176 io_request->RaidContext.raid_context_g35.nseg = 0x1;
1178 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1179 io_request->DevHandle = device_id;
1185 * mrsas_build_ldio_nonrw: Builds an LDIO command
1186 * input: Adapter instance soft state
1187 * Pointer to command packet
1190 * This function builds the LDIO command packet. It returns 0 if the command is
1191 * built successfully, otherwise it returns a 1.
1194 mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1197 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1198 u_int32_t device_id, ld;
1199 MR_DRV_RAID_MAP_ALL *map_ptr;
1201 RAID_CONTEXT *pRAID_Context;
1202 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1204 io_request = cmd->io_request;
1205 device_id = ccb_h->target_id;
1207 map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1208 ld = MR_TargetIdToLdGet(device_id, map_ptr);
1209 raid = MR_LdRaidGet(ld, map_ptr);
1210 /* get RAID_Context pointer */
1211 pRAID_Context = &io_request->RaidContext.raid_context;
1212 /* Store the TM capability value in cmd */
1213 cmd->tmCapable = raid->capability.tmCapable;
1215 /* FW path for LD Non-RW (SCSI management commands) */
1216 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1217 io_request->DevHandle = device_id;
1218 cmd->request_desc->SCSIIO.RequestFlags =
1219 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1220 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1222 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1223 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1224 io_request->DataLength = cmd->length;
1226 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1227 if (cmd->sge_count > sc->max_num_sge) {
1228 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
1229 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
1232 if (sc->is_ventura || sc->is_aero)
1233 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1236 * numSGE store lower 8 bit of sge_count. numSGEExt store
1237 * higher 8 bit of sge_count
1239 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1240 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1243 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1250 * mrsas_build_syspdio: Builds an DCDB command
1251 * input: Adapter instance soft state
1252 * Pointer to command packet
1255 * This function builds the DCDB inquiry command. It returns 0 if the command
1256 * is built successfully, otherwise it returns a 1.
1259 mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
1260 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
1262 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
1263 u_int32_t device_id;
1264 MR_DRV_RAID_MAP_ALL *local_map_ptr;
1265 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1266 RAID_CONTEXT *pRAID_Context;
1267 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1269 io_request = cmd->io_request;
1270 /* get RAID_Context pointer */
1271 pRAID_Context = &io_request->RaidContext.raid_context;
1272 device_id = ccb_h->target_id;
1273 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1274 io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
1275 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
1276 io_request->RaidContext.raid_context.regLockFlags = 0;
1277 io_request->RaidContext.raid_context.regLockRowLBA = 0;
1278 io_request->RaidContext.raid_context.regLockLength = 0;
1280 cmd->pdInterface = sc->target_list[device_id].interface_type;
1282 /* If FW supports PD sequence number */
1283 if (sc->use_seqnum_jbod_fp &&
1284 sc->pd_list[device_id].driveType == 0x00) {
1285 //printf("Using Drv seq num\n");
1286 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
1287 cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable;
1288 /* More than 256 PD/JBOD support for Ventura */
1289 if (sc->support_morethan256jbod)
1290 io_request->RaidContext.raid_context.VirtualDiskTgtId =
1291 pd_sync->seq[device_id].pdTargetId;
1293 io_request->RaidContext.raid_context.VirtualDiskTgtId =
1295 io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum;
1296 io_request->DevHandle = pd_sync->seq[device_id].devHandle;
1297 if (sc->is_ventura || sc->is_aero)
1298 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1;
1300 io_request->RaidContext.raid_context.regLockFlags |=
1301 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
1302 /* raid_context.Type = MPI2_TYPE_CUDA is valid only,
1303 * if FW support Jbod Sequence number
1305 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA;
1306 io_request->RaidContext.raid_context.nseg = 0x1;
1307 } else if (sc->fast_path_io) {
1308 //printf("Using LD RAID map\n");
1309 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1310 io_request->RaidContext.raid_context.configSeqNum = 0;
1311 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
1312 io_request->DevHandle =
1313 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
1315 //printf("Using FW PATH\n");
1316 /* Want to send all IO via FW path */
1317 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1318 io_request->RaidContext.raid_context.configSeqNum = 0;
1319 io_request->DevHandle = MR_DEVHANDLE_INVALID;
1322 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
1323 cmd->request_desc->SCSIIO.MSIxIndex =
1324 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
1327 /* system pd firmware path */
1328 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
1329 cmd->request_desc->SCSIIO.RequestFlags =
1330 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1331 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1332 io_request->RaidContext.raid_context.timeoutValue =
1333 local_map_ptr->raidMap.fpPdIoTimeoutSec;
1334 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id;
1336 /* system pd fast path */
1337 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1338 io_request->RaidContext.raid_context.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec;
1341 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
1342 * Because the NON RW cmds will now go via FW Queue
1343 * and not the Exception queue
1345 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero)
1346 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1348 cmd->request_desc->SCSIIO.RequestFlags =
1349 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
1350 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1353 io_request->LUN[1] = ccb_h->target_lun & 0xF;
1354 io_request->DataLength = cmd->length;
1356 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
1357 if (cmd->sge_count > sc->max_num_sge) {
1358 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
1359 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
1362 if (sc->is_ventura || sc->is_aero)
1363 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count;
1366 * numSGE store lower 8 bit of sge_count. numSGEExt store
1367 * higher 8 bit of sge_count
1369 io_request->RaidContext.raid_context.numSGE = cmd->sge_count;
1370 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
1373 device_printf(sc->mrsas_dev, "Data map/load failed.\n");
1380 * mrsas_is_prp_possible: This function will tell whether PRPs should be built or not
1381 * sc: Adapter instance soft state
1382 * cmd: MPT command frame pointer
1383 * nsesg: Number of OS SGEs
1385 * This function will check whether IO is qualified to build PRPs
1386 * return: true: if PRP should be built
1387 * false: if IEEE SGLs should be built
1389 static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
1390 bus_dma_segment_t *segs, int nsegs)
1392 struct mrsas_softc *sc = cmd->sc;
1394 u_int32_t data_length = 0;
1395 bool build_prp = false;
1396 u_int32_t mr_nvme_pg_size;
1398 mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE);
1399 data_length = cmd->length;
1401 if (data_length > (mr_nvme_pg_size * 5))
1403 else if ((data_length > (mr_nvme_pg_size * 4)) &&
1404 (data_length <= (mr_nvme_pg_size * 5))) {
1405 /* check if 1st SG entry size is < residual beyond 4 pages */
1406 if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4)))
1410 /*check for SGE holes here*/
1411 for (i = 0; i < nsegs; i++) {
1412 /* check for mid SGEs */
1413 if ((i != 0) && (i != (nsegs - 1))) {
1414 if ((segs[i].ds_addr % mr_nvme_pg_size) ||
1415 (segs[i].ds_len % mr_nvme_pg_size)) {
1417 mrsas_atomic_inc(&sc->sge_holes);
1422 /* check for first SGE*/
1423 if ((nsegs > 1) && (i == 0)) {
1424 if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) {
1426 mrsas_atomic_inc(&sc->sge_holes);
1431 /* check for Last SGE*/
1432 if ((nsegs > 1) && (i == (nsegs - 1))) {
1433 if (segs[i].ds_addr % mr_nvme_pg_size) {
1435 mrsas_atomic_inc(&sc->sge_holes);
1446 * mrsas_map_request: Map and load data
1447 * input: Adapter instance soft state
1448 * Pointer to command packet
1450 * For data from OS, map and load the data buffer into bus space. The SG list
1451 * is built in the callback. If the bus dmamap load is not successful,
1452 * cmd->error_code will contain the error code and a 1 is returned.
1455 mrsas_map_request(struct mrsas_softc *sc,
1456 struct mrsas_mpt_cmd *cmd, union ccb *ccb)
1458 u_int32_t retcode = 0;
1459 struct cam_sim *sim;
1461 sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path);
1463 if (cmd->data != NULL) {
1464 /* Map data buffer into bus space */
1465 mtx_lock(&sc->io_lock);
1466 #if (__FreeBSD_version >= 902001)
1467 retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb,
1468 mrsas_data_load_cb, cmd, 0);
1470 retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data,
1471 cmd->length, mrsas_data_load_cb, cmd, BUS_DMA_NOWAIT);
1473 mtx_unlock(&sc->io_lock);
1475 device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode);
1476 if (retcode == EINPROGRESS) {
1477 device_printf(sc->mrsas_dev, "request load in progress\n");
1478 mrsas_freeze_simq(cmd, sim);
1481 if (cmd->error_code)
1487 * mrsas_unmap_request: Unmap and unload data
1488 * input: Adapter instance soft state
1489 * Pointer to command packet
1491 * This function unmaps and unloads data from OS.
1494 mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1496 if (cmd->data != NULL) {
1497 if (cmd->flags & MRSAS_DIR_IN)
1498 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD);
1499 if (cmd->flags & MRSAS_DIR_OUT)
1500 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE);
1501 mtx_lock(&sc->io_lock);
1502 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap);
1503 mtx_unlock(&sc->io_lock);
1508 * mrsas_build_ieee_sgl - Prepare IEEE SGLs
1509 * @sc: Adapter soft state
1510 * @segs: OS SGEs pointers
1511 * @nseg: Number of OS SGEs
1512 * @cmd: Fusion command frame
1515 static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1517 struct mrsas_softc *sc = cmd->sc;
1518 MRSAS_RAID_SCSI_IO_REQUEST *io_request;
1519 pMpi25IeeeSgeChain64_t sgl_ptr;
1520 int i = 0, sg_processed = 0;
1522 io_request = cmd->io_request;
1523 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
1525 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1526 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
1528 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
1529 sgl_ptr_end->Flags = 0;
1532 for (i = 0; i < nseg; i++) {
1533 sgl_ptr->Address = segs[i].ds_addr;
1534 sgl_ptr->Length = segs[i].ds_len;
1536 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1538 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1541 sg_processed = i + 1;
1542 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
1543 (nseg > sc->max_sge_in_main_msg)) {
1544 pMpi25IeeeSgeChain64_t sg_chain;
1546 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) {
1547 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1548 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1549 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1551 cmd->io_request->ChainOffset = 0;
1553 cmd->io_request->ChainOffset = sc->chain_offset_io_request;
1555 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero)
1556 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1558 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1559 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed));
1560 sg_chain->Address = cmd->chain_frame_phys_addr;
1561 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame;
1568 * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1569 * @sc: Adapter soft state
1570 * @segs: OS SGEs pointers
1571 * @nseg: Number of OS SGEs
1572 * @cmd: Fusion command frame
1575 static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
1577 struct mrsas_softc *sc = cmd->sc;
1578 int sge_len, offset, num_prp_in_chain = 0;
1579 pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr;
1581 bus_addr_t ptr_sgl_phys;
1583 u_int32_t page_mask, page_mask_result, i = 0;
1584 u_int32_t first_prp_len;
1585 int data_len = cmd->length;
1586 u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size,
1587 MR_DEFAULT_NVME_PAGE_SIZE);
1589 sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL;
1591 * NVMe has a very convoluted PRP format. One PRP is required
1592 * for each page or partial page. We need to split up OS SG
1593 * entries if they are longer than one page or cross a page
1594 * boundary. We also have to insert a PRP list pointer entry as
1595 * the last entry in each physical page of the PRP list.
1597 * NOTE: The first PRP "entry" is actually placed in the first
1598 * SGL entry in the main message in IEEE 64 format. The 2nd
1599 * entry in the main message is the chain element, and the rest
1600 * of the PRP entries are built in the contiguous PCIe buffer.
1602 page_mask = mr_nvme_pg_size - 1;
1603 ptr_sgl = (u_int64_t *) cmd->chain_frame;
1604 ptr_sgl_phys = cmd->chain_frame_phys_addr;
1605 memset(ptr_sgl, 0, sc->max_chain_frame_sz);
1607 /* Build chain frame element which holds all PRPs except first*/
1608 main_chain_element = (pMpi25IeeeSgeChain64_t)
1609 ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64));
1612 main_chain_element->Address = cmd->chain_frame_phys_addr;
1613 main_chain_element->NextChainOffset = 0;
1614 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1615 IEEE_SGE_FLAGS_SYSTEM_ADDR |
1616 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
1619 /* Build first PRP, SGE need not to be PAGE aligned*/
1620 ptr_first_sgl = sgl_ptr;
1621 sge_addr = segs[i].ds_addr;
1622 sge_len = segs[i].ds_len;
1625 offset = (u_int32_t) (sge_addr & page_mask);
1626 first_prp_len = mr_nvme_pg_size - offset;
1628 ptr_first_sgl->Address = sge_addr;
1629 ptr_first_sgl->Length = first_prp_len;
1631 data_len -= first_prp_len;
1633 if (sge_len > first_prp_len) {
1634 sge_addr += first_prp_len;
1635 sge_len -= first_prp_len;
1636 } else if (sge_len == first_prp_len) {
1637 sge_addr = segs[i].ds_addr;
1638 sge_len = segs[i].ds_len;
1644 offset = (u_int32_t) (sge_addr & page_mask);
1646 /* Put PRP pointer due to page boundary*/
1647 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
1648 if (!page_mask_result) {
1649 device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary"
1650 " ptr_sgl: 0x%p\n", ptr_sgl);
1652 *ptr_sgl = (uintptr_t)ptr_sgl_phys;
1657 *ptr_sgl = sge_addr;
1663 sge_addr += mr_nvme_pg_size;
1664 sge_len -= mr_nvme_pg_size;
1665 data_len -= mr_nvme_pg_size;
1673 sge_addr = segs[i].ds_addr;
1674 sge_len = segs[i].ds_len;
1678 main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t);
1679 mrsas_atomic_inc(&sc->prp_count);
1684 * mrsas_data_load_cb: Callback entry point to build SGLs
1685 * input: Pointer to command packet as argument
1686 * Pointer to segment
1687 * Number of segments Error
1689 * This is the callback function of the bus dma map load. It builds SG list
1692 mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1694 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
1695 struct mrsas_softc *sc = cmd->sc;
1696 boolean_t build_prp = false;
1699 cmd->error_code = error;
1700 device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error);
1701 if (error == EFBIG) {
1702 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
1706 if (cmd->flags & MRSAS_DIR_IN)
1707 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1708 BUS_DMASYNC_PREREAD);
1709 if (cmd->flags & MRSAS_DIR_OUT)
1710 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
1711 BUS_DMASYNC_PREWRITE);
1712 if (nseg > sc->max_num_sge) {
1713 device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
1717 /* Check for whether PRPs should be built or IEEE SGLs*/
1718 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
1719 (cmd->pdInterface == NVME_PD))
1720 build_prp = mrsas_is_prp_possible(cmd, segs, nseg);
1722 if (build_prp == true)
1723 mrsas_build_prp_nvme(cmd, segs, nseg);
1725 mrsas_build_ieee_sgl(cmd, segs, nseg);
1727 cmd->sge_count = nseg;
1731 * mrsas_freeze_simq: Freeze SIM queue
1732 * input: Pointer to command packet
1735 * This function freezes the sim queue.
1738 mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim)
1740 union ccb *ccb = (union ccb *)(cmd->ccb_ptr);
1742 xpt_freeze_simq(sim, 1);
1743 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1744 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1748 mrsas_xpt_freeze(struct mrsas_softc *sc)
1750 xpt_freeze_simq(sc->sim_0, 1);
1751 xpt_freeze_simq(sc->sim_1, 1);
1755 mrsas_xpt_release(struct mrsas_softc *sc)
1757 xpt_release_simq(sc->sim_0, 1);
1758 xpt_release_simq(sc->sim_1, 1);
1762 * mrsas_cmd_done: Perform remaining command completion
1763 * input: Adapter instance soft state Pointer to command packet
1765 * This function calls ummap request and releases the MPT command.
1768 mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
1770 mrsas_unmap_request(sc, cmd);
1772 mtx_lock(&sc->sim_lock);
1773 if (cmd->callout_owner) {
1774 callout_stop(&cmd->cm_callout);
1775 cmd->callout_owner = false;
1777 xpt_done(cmd->ccb_ptr);
1778 cmd->ccb_ptr = NULL;
1779 mtx_unlock(&sc->sim_lock);
1780 mrsas_release_mpt_cmd(cmd);
1784 * mrsas_cam_poll: Polling entry point
1785 * input: Pointer to SIM
1787 * This is currently a stub function.
1790 mrsas_cam_poll(struct cam_sim *sim)
1793 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
1795 if (sc->msix_vectors != 0){
1796 for (i=0; i<sc->msix_vectors; i++){
1797 mrsas_complete_cmd(sc, i);
1800 mrsas_complete_cmd(sc, 0);
1805 * mrsas_bus_scan: Perform bus scan
1806 * input: Adapter instance soft state
1808 * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not
1809 * be called in FreeBSD 8.x and later versions, where the bus scan is
1813 mrsas_bus_scan(struct mrsas_softc *sc)
1818 if ((ccb_0 = xpt_alloc_ccb()) == NULL) {
1821 if ((ccb_1 = xpt_alloc_ccb()) == NULL) {
1822 xpt_free_ccb(ccb_0);
1825 mtx_lock(&sc->sim_lock);
1826 if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0),
1827 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1828 xpt_free_ccb(ccb_0);
1829 xpt_free_ccb(ccb_1);
1830 mtx_unlock(&sc->sim_lock);
1833 if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1),
1834 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1835 xpt_free_ccb(ccb_0);
1836 xpt_free_ccb(ccb_1);
1837 mtx_unlock(&sc->sim_lock);
1840 mtx_unlock(&sc->sim_lock);
1848 * mrsas_bus_scan_sim: Perform bus scan per SIM
1849 * input: adapter instance soft state
1851 * This function will be called from Event handler on LD creation/deletion,
1855 mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim)
1859 if ((ccb = xpt_alloc_ccb()) == NULL) {
1862 mtx_lock(&sc->sim_lock);
1863 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
1864 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1866 mtx_unlock(&sc->sim_lock);
1869 mtx_unlock(&sc->sim_lock);
1876 * mrsas_track_scsiio: Track IOs for a given target in the mpt_cmd_list
1877 * input: Adapter instance soft state
1878 * Target ID of target
1879 * Bus ID of the target
1881 * This function checks for any pending IO in the whole mpt_cmd_list pool
1882 * with the bus_id and target_id passed in arguments. If some IO is found
1883 * that means target reset is not successfully completed.
1885 * Returns FAIL if IOs pending to the target device, else return SUCCESS
1888 mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id)
1891 struct mrsas_mpt_cmd *mpt_cmd = NULL;
1893 for (i = 0 ; i < sc->max_fw_cmds; i++) {
1894 mpt_cmd = sc->mpt_cmd_list[i];
1897 * Check if the target_id and bus_id is same as the timeout IO
1899 if (mpt_cmd->ccb_ptr) {
1900 /* bus_id = 1 denotes a VD */
1902 tgt_id = (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1));
1904 if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id &&
1905 mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) {
1906 device_printf(sc->mrsas_dev,
1907 "IO commands pending to target id %d\n", tgt_id);
1918 * mrsas_tm_response_code: Prints TM response code received from FW
1919 * input: Adapter instance soft state
1920 * MPI reply returned from firmware
1925 mrsas_tm_response_code(struct mrsas_softc *sc,
1926 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
1930 switch (mpi_reply->ResponseCode) {
1931 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1932 desc = "task management request completed";
1934 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1935 desc = "invalid frame";
1937 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1938 desc = "task management request not supported";
1940 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1941 desc = "task management request failed";
1943 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1944 desc = "task management request succeeded";
1946 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1947 desc = "invalid lun";
1950 desc = "overlapped tag attempted";
1952 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1953 desc = "task queued, however not sent to target";
1959 device_printf(sc->mrsas_dev, "response_code(%01x): %s\n",
1960 mpi_reply->ResponseCode, desc);
1961 device_printf(sc->mrsas_dev,
1962 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n"
1963 "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
1964 mpi_reply->TerminationCount, mpi_reply->DevHandle,
1965 mpi_reply->Function, mpi_reply->TaskType,
1966 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
1971 * mrsas_issue_tm: Fires the TM command to FW and waits for completion
1972 * input: Adapter instance soft state
1973 * reqest descriptor compiled by mrsas_reset_targets
1975 * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS.
1978 mrsas_issue_tm(struct mrsas_softc *sc,
1979 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc)
1983 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
1984 sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz);
1986 if (sleep_stat == EWOULDBLOCK) {
1987 device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n");
1995 * mrsas_reset_targets : Gathers info to fire a target reset command
1996 * input: Adapter instance soft state
1998 * This function compiles data for a target reset command to be fired to the FW
1999 * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs.
2001 * Returns SUCCESS or FAIL
2003 int mrsas_reset_targets(struct mrsas_softc *sc)
2005 struct mrsas_mpt_cmd *tm_mpt_cmd = NULL;
2006 struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL;
2007 MR_TASK_MANAGE_REQUEST *mr_request;
2008 MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request;
2009 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2010 int retCode = FAIL, count, i, outstanding;
2011 u_int32_t MSIxIndex, bus_id;
2014 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
2017 outstanding = mrsas_atomic_read(&sc->fw_outstanding);
2020 device_printf(sc->mrsas_dev, "NO IOs pending...\n");
2021 mrsas_atomic_set(&sc->target_reset_outstanding, 0);
2024 } else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) {
2025 device_printf(sc->mrsas_dev, "Controller is not operational\n");
2028 /* Some more error checks will be added in future */
2031 /* Get an mpt frame and an index to fire the TM cmd */
2032 tm_mpt_cmd = mrsas_get_mpt_cmd(sc);
2038 req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1);
2040 device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n");
2044 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION));
2046 req_desc->HighPriority.SMID = tm_mpt_cmd->index;
2047 req_desc->HighPriority.RequestFlags =
2048 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
2049 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2050 req_desc->HighPriority.MSIxIndex = 0;
2051 req_desc->HighPriority.LMID = 0;
2052 req_desc->HighPriority.Reserved1 = 0;
2053 tm_mpt_cmd->request_desc = req_desc;
2055 mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request;
2056 memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST));
2058 tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
2059 tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2060 tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2061 tm_mpi_request->TaskMID = 0; /* smid task */
2062 tm_mpi_request->LUN[1] = 0;
2064 /* Traverse the tm_mpt pool to get valid entries */
2065 for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) {
2066 if(!sc->target_reset_pool[i]) {
2069 tgt_mpt_cmd = sc->target_reset_pool[i];
2074 /* See if the target is tm capable or NOT */
2075 if (!tgt_mpt_cmd->tmCapable) {
2076 device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for "
2077 "CAM target:%d\n", tgt_id);
2083 tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle;
2085 if (i < (MRSAS_MAX_PD - 1)) {
2086 mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1;
2089 mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1;
2093 device_printf(sc->mrsas_dev, "TM will be fired for "
2094 "CAM target:%d and bus_id %d\n", tgt_id, bus_id);
2096 sc->ocr_chan = (void *)&tm_mpt_cmd;
2097 retCode = mrsas_issue_tm(sc, req_desc);
2098 if (retCode == FAIL)
2103 (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply;
2104 mrsas_tm_response_code(sc, mpi_reply);
2106 mrsas_atomic_dec(&sc->target_reset_outstanding);
2107 sc->target_reset_pool[i] = NULL;
2109 /* Check for pending cmds in the mpt_cmd_pool with the tgt_id */
2110 mrsas_disable_intr(sc);
2111 /* Wait for 1 second to complete parallel ISR calling same
2112 * mrsas_complete_cmd()
2114 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup",
2116 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2117 mtx_unlock(&sc->sim_lock);
2118 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2119 mrsas_complete_cmd(sc, MSIxIndex);
2120 mtx_lock(&sc->sim_lock);
2121 retCode = mrsas_track_scsiio(sc, tgt_id, bus_id);
2122 mrsas_enable_intr(sc);
2124 if (retCode == FAIL)
2128 device_printf(sc->mrsas_dev, "Number of targets outstanding "
2129 "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding));
2132 mrsas_release_mpt_cmd(tm_mpt_cmd);
2134 device_printf(sc->mrsas_dev, "target reset %s!!\n",
2135 (retCode == SUCCESS) ? "SUCCESS" : "FAIL");