2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2015 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 /* Communications core for Avago Technologies (LSI) MPT2 */
38 /* TODO Move headers to mpsvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
48 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
57 #include <machine/bus.h>
58 #include <machine/resource.h>
61 #include <machine/stdarg.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_xpt.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #if __FreeBSD_version >= 900026
74 #include <cam/scsi/smp_all.h>
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
89 #define MPSSAS_DISCOVERY_TIMEOUT 20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
93 * static array to check SCSI OpCode for EEDP protection bits
95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124 struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128 struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 #if __FreeBSD_version >= 900026
133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137 #endif //FreeBSD_version >= 900026
138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149 struct mps_command *cm);
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
154 struct mpssas_target *target;
157 for (i = start; i < sassc->maxtargets; i++) {
158 target = &sassc->targets[i];
159 if (target->handle == handle)
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery. Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
174 mpssas_startup_increment(struct mpssas_softc *sassc)
176 MPS_FUNCTRACE(sassc->sc);
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INIT,
182 "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
186 xpt_freeze_simq(sassc->sim, 1);
188 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 sassc->startup_refcount);
194 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
196 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198 xpt_release_simq(sassc->sim, 1);
199 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
204 mpssas_startup_decrement(struct mpssas_softc *sassc)
206 MPS_FUNCTRACE(sassc->sc);
208 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209 if (--sassc->startup_refcount == 0) {
210 /* finished all discovery-related actions, release
211 * the simq and rescan for the latest topology.
213 mps_dprint(sassc->sc, MPS_INIT,
214 "%s releasing simq\n", __func__);
215 sassc->flags &= ~MPSSAS_IN_STARTUP;
216 xpt_release_simq(sassc->sim, 1);
217 #if __FreeBSD_version >= 1000039
220 mpssas_rescan_target(sassc->sc, NULL);
223 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224 sassc->startup_refcount);
228 /* The firmware requires us to stop sending commands when we're doing task
229 * management, so refcount the TMs and keep the simq frozen when any are in
233 mpssas_alloc_tm(struct mps_softc *sc)
235 struct mps_command *tm;
237 tm = mps_alloc_high_priority_command(sc);
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
244 int target_id = 0xFFFFFFFF;
250 * For TM's the devq is frozen for the device. Unfreeze it here and
251 * free the resources used for freezing the devq. Must clear the
252 * INRESET flag as well or scsi I/O will not work.
254 if (tm->cm_targ != NULL) {
255 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
256 target_id = tm->cm_targ->tid;
259 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
261 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
262 xpt_free_path(tm->cm_ccb->ccb_h.path);
263 xpt_free_ccb(tm->cm_ccb);
266 mps_free_high_priority_command(sc, tm);
270 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
272 struct mpssas_softc *sassc = sc->sassc;
274 target_id_t targetid;
278 pathid = cam_sim_path(sassc->sim);
280 targetid = CAM_TARGET_WILDCARD;
282 targetid = targ - sassc->targets;
285 * Allocate a CCB and schedule a rescan.
287 ccb = xpt_alloc_ccb_nowait();
289 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
293 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
294 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
295 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
300 if (targetid == CAM_TARGET_WILDCARD)
301 ccb->ccb_h.func_code = XPT_SCAN_BUS;
303 ccb->ccb_h.func_code = XPT_SCAN_TGT;
305 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
310 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
320 /* No need to be in here if debugging isn't enabled */
321 if ((cm->cm_sc->mps_debug & level) == 0)
324 sbuf_new(&sb, str, sizeof(str), 0);
328 if (cm->cm_ccb != NULL) {
329 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
331 sbuf_cat(&sb, path_str);
332 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
333 scsi_command_string(&cm->cm_ccb->csio, &sb);
334 sbuf_printf(&sb, "length %d ",
335 cm->cm_ccb->csio.dxfer_len);
339 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
340 cam_sim_name(cm->cm_sc->sassc->sim),
341 cam_sim_unit(cm->cm_sc->sassc->sim),
342 cam_sim_bus(cm->cm_sc->sassc->sim),
343 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
347 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
348 sbuf_vprintf(&sb, fmt, ap);
350 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
357 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
359 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
360 struct mpssas_target *targ;
365 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
366 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
370 /* XXX retry the remove after the diag reset completes? */
371 mps_dprint(sc, MPS_FAULT,
372 "%s NULL reply resetting device 0x%04x\n", __func__,
374 mpssas_free_tm(sc, tm);
378 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 MPI2_IOCSTATUS_SUCCESS) {
380 mps_dprint(sc, MPS_ERROR,
381 "IOCStatus = 0x%x while resetting device 0x%x\n",
382 le16toh(reply->IOCStatus), handle);
385 mps_dprint(sc, MPS_XINFO,
386 "Reset aborted %u commands\n", reply->TerminationCount);
387 mps_free_reply(sc, tm->cm_reply_data);
388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
390 mps_dprint(sc, MPS_XINFO,
391 "clearing target %u handle 0x%04x\n", targ->tid, handle);
394 * Don't clear target if remove fails because things will get confusing.
395 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 * this target id if possible, and so we can assign the same target id
397 * to this device if it comes back in the future.
399 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
400 MPI2_IOCSTATUS_SUCCESS) {
403 targ->encl_handle = 0x0;
404 targ->encl_slot = 0x0;
405 targ->exp_dev_handle = 0x0;
407 targ->linkrate = 0x0;
412 mpssas_free_tm(sc, tm);
417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418 * Otherwise Volume Delete is same as Bare Drive Removal.
421 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
423 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424 struct mps_softc *sc;
425 struct mps_command *cm;
426 struct mpssas_target *targ = NULL;
428 MPS_FUNCTRACE(sassc->sc);
433 * If this is a WD controller, determine if the disk should be exposed
434 * to the OS or not. If disk should be exposed, return from this
435 * function without doing anything.
437 if (sc->WD_available && (sc->WD_hide_expose ==
438 MPS_WD_EXPOSE_ALWAYS)) {
443 targ = mpssas_find_target_by_handle(sassc, 0, handle);
445 /* FIXME: what is the action? */
446 /* We don't know about this device? */
447 mps_dprint(sc, MPS_ERROR,
448 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
452 targ->flags |= MPSSAS_TARGET_INREMOVAL;
454 cm = mpssas_alloc_tm(sc);
456 mps_dprint(sc, MPS_ERROR,
457 "%s: command alloc failure\n", __func__);
461 mpssas_rescan_target(sc, targ);
463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
464 req->DevHandle = targ->handle;
465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
466 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
468 /* SAS Hard Link Reset / SATA Link Reset */
469 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
473 cm->cm_desc.HighPriority.RequestFlags =
474 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
475 cm->cm_complete = mpssas_remove_volume;
476 cm->cm_complete_data = (void *)(uintptr_t)handle;
478 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
479 __func__, targ->tid);
480 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
482 mps_map_command(sc, cm);
486 * The MPT2 firmware performs debounce on the link to avoid transient link
487 * errors and false removals. When it does decide that link has been lost
488 * and a device need to go away, it expects that the host will perform a
489 * target reset and then an op remove. The reset has the side-effect of
490 * aborting any outstanding requests for the device, which is required for
491 * the op-remove to succeed. It's not clear if the host should check for
492 * the device coming back alive after the reset.
495 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
497 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
498 struct mps_softc *sc;
499 struct mps_command *cm;
500 struct mpssas_target *targ = NULL;
502 MPS_FUNCTRACE(sassc->sc);
506 targ = mpssas_find_target_by_handle(sassc, 0, handle);
508 /* FIXME: what is the action? */
509 /* We don't know about this device? */
510 mps_dprint(sc, MPS_ERROR,
511 "%s : invalid handle 0x%x \n", __func__, handle);
515 targ->flags |= MPSSAS_TARGET_INREMOVAL;
517 cm = mpssas_alloc_tm(sc);
519 mps_dprint(sc, MPS_ERROR,
520 "%s: command alloc failure\n", __func__);
524 mpssas_rescan_target(sc, targ);
526 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
527 memset(req, 0, sizeof(*req));
528 req->DevHandle = htole16(targ->handle);
529 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
530 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
532 /* SAS Hard Link Reset / SATA Link Reset */
533 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
537 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
538 cm->cm_complete = mpssas_remove_device;
539 cm->cm_complete_data = (void *)(uintptr_t)handle;
541 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
542 __func__, targ->tid);
543 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
545 mps_map_command(sc, cm);
549 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
551 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
552 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
553 struct mpssas_target *targ;
554 struct mps_command *next_cm;
559 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
560 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
564 * Currently there should be no way we can hit this case. It only
565 * happens when we have a failure to allocate chain frames, and
566 * task management commands don't have S/G lists.
568 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
569 mps_dprint(sc, MPS_ERROR,
570 "%s: cm_flags = %#x for remove of handle %#04x! "
571 "This should not happen!\n", __func__, tm->cm_flags,
576 /* XXX retry the remove after the diag reset completes? */
577 mps_dprint(sc, MPS_FAULT,
578 "%s NULL reply resetting device 0x%04x\n", __func__,
580 mpssas_free_tm(sc, tm);
584 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
585 MPI2_IOCSTATUS_SUCCESS) {
586 mps_dprint(sc, MPS_ERROR,
587 "IOCStatus = 0x%x while resetting device 0x%x\n",
588 le16toh(reply->IOCStatus), handle);
591 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
592 le32toh(reply->TerminationCount));
593 mps_free_reply(sc, tm->cm_reply_data);
594 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
596 /* Reuse the existing command */
597 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
598 memset(req, 0, sizeof(*req));
599 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
600 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
601 req->DevHandle = htole16(handle);
603 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
604 tm->cm_complete = mpssas_remove_complete;
605 tm->cm_complete_data = (void *)(uintptr_t)handle;
607 mps_map_command(sc, tm);
609 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mpssas_scsiio_complete(sc, tm);
622 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
626 struct mpssas_target *targ;
627 struct mpssas_lun *lun;
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
639 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
640 mps_dprint(sc, MPS_XINFO,
641 "%s: cm_flags = %#x for remove of handle %#04x! "
642 "This should not happen!\n", __func__, tm->cm_flags,
644 mpssas_free_tm(sc, tm);
649 /* most likely a chip reset */
650 mps_dprint(sc, MPS_FAULT,
651 "%s NULL reply removing device 0x%04x\n", __func__, handle);
652 mpssas_free_tm(sc, tm);
656 mps_dprint(sc, MPS_XINFO,
657 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
658 handle, le16toh(reply->IOCStatus));
661 * Don't clear target if remove fails because things will get confusing.
662 * Leave the devname and sasaddr intact so that we know to avoid reusing
663 * this target id if possible, and so we can assign the same target id
664 * to this device if it comes back in the future.
666 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
667 MPI2_IOCSTATUS_SUCCESS) {
670 targ->encl_handle = 0x0;
671 targ->encl_slot = 0x0;
672 targ->exp_dev_handle = 0x0;
674 targ->linkrate = 0x0;
678 while(!SLIST_EMPTY(&targ->luns)) {
679 lun = SLIST_FIRST(&targ->luns);
680 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 mpssas_free_tm(sc, tm);
690 mpssas_register_events(struct mps_softc *sc)
692 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
695 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 setbit(events, MPI2_EVENT_IR_VOLUME);
704 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
708 mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 &sc->sassc->mpssas_eh);
715 mps_attach_sas(struct mps_softc *sc)
717 struct mpssas_softc *sassc;
723 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
725 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
731 * XXX MaxTargets could change during a reinit. Since we don't
732 * resize the targets[] array during such an event, cache the value
733 * of MaxTargets here so that we don't get into trouble later. This
734 * should move into the reinit logic.
736 sassc->maxtargets = sc->facts->MaxTargets;
737 sassc->targets = malloc(sizeof(struct mpssas_target) *
738 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
739 if(!sassc->targets) {
740 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
748 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
749 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
754 unit = device_get_unit(sc->mps_dev);
755 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
756 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
757 if (sassc->sim == NULL) {
758 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
763 TAILQ_INIT(&sassc->ev_queue);
765 /* Initialize taskqueue for Event Handling */
766 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
767 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
768 taskqueue_thread_enqueue, &sassc->ev_tq);
769 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
770 device_get_nameunit(sc->mps_dev));
775 * XXX There should be a bus for every port on the adapter, but since
776 * we're just going to fake the topology for now, we'll pretend that
777 * everything is just a target on a single bus.
779 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
780 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
787 * Assume that discovery events will start right away.
789 * Hold off boot until discovery is complete.
791 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
792 sc->sassc->startup_refcount = 0;
793 mpssas_startup_increment(sassc);
795 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
798 * Register for async events so we can determine the EEDP
799 * capabilities of devices.
801 status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
804 if (status != CAM_REQ_CMP) {
805 mps_printf(sc, "Error %#x creating sim path\n", status);
810 #if (__FreeBSD_version >= 1000006) || \
811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 event = AC_ADVINFO_CHANGED;
814 event = AC_FOUND_DEVICE;
816 status = xpt_register_async(event, mpssas_async, sc,
818 if (status != CAM_REQ_CMP) {
819 mps_dprint(sc, MPS_ERROR,
820 "Error %#x registering async handler for "
821 "AC_ADVINFO_CHANGED events\n", status);
822 xpt_free_path(sassc->path);
826 if (status != CAM_REQ_CMP) {
828 * EEDP use is the exception, not the rule.
829 * Warn the user, but do not fail to attach.
831 mps_printf(sc, "EEDP capabilities disabled.\n");
836 mpssas_register_events(sc);
844 mps_detach_sas(struct mps_softc *sc)
846 struct mpssas_softc *sassc;
847 struct mpssas_lun *lun, *lun_tmp;
848 struct mpssas_target *targ;
853 if (sc->sassc == NULL)
857 mps_deregister_events(sc, sassc->mpssas_eh);
860 * Drain and free the event handling taskqueue with the lock
861 * unheld so that any parallel processing tasks drain properly
862 * without deadlocking.
864 if (sassc->ev_tq != NULL)
865 taskqueue_free(sassc->ev_tq);
867 /* Make sure CAM doesn't wedge if we had to bail out early. */
870 /* Deregister our async handler */
871 if (sassc->path != NULL) {
872 xpt_register_async(0, mpssas_async, sc, sassc->path);
873 xpt_free_path(sassc->path);
877 if (sassc->flags & MPSSAS_IN_STARTUP)
878 xpt_release_simq(sassc->sim, 1);
880 if (sassc->sim != NULL) {
881 xpt_bus_deregister(cam_sim_path(sassc->sim));
882 cam_sim_free(sassc->sim, FALSE);
887 if (sassc->devq != NULL)
888 cam_simq_free(sassc->devq);
890 for(i=0; i< sassc->maxtargets ;i++) {
891 targ = &sassc->targets[i];
892 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
896 free(sassc->targets, M_MPT2);
904 mpssas_discovery_end(struct mpssas_softc *sassc)
906 struct mps_softc *sc = sassc->sc;
910 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
911 callout_stop(&sassc->discovery_callout);
916 mpssas_action(struct cam_sim *sim, union ccb *ccb)
918 struct mpssas_softc *sassc;
920 sassc = cam_sim_softc(sim);
922 MPS_FUNCTRACE(sassc->sc);
923 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
924 ccb->ccb_h.func_code);
925 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
927 switch (ccb->ccb_h.func_code) {
930 struct ccb_pathinq *cpi = &ccb->cpi;
931 struct mps_softc *sc = sassc->sc;
932 uint8_t sges_per_frame;
934 cpi->version_num = 1;
935 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
936 cpi->target_sprt = 0;
937 #if __FreeBSD_version >= 1000039
938 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
940 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
942 cpi->hba_eng_cnt = 0;
943 cpi->max_target = sassc->maxtargets - 1;
945 cpi->initiator_id = sassc->maxtargets - 1;
946 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
947 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
948 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
949 cpi->unit_number = cam_sim_unit(sim);
950 cpi->bus_id = cam_sim_bus(sim);
951 cpi->base_transfer_speed = 150000;
952 cpi->transport = XPORT_SAS;
953 cpi->transport_version = 0;
954 cpi->protocol = PROTO_SCSI;
955 cpi->protocol_version = SCSI_REV_SPC;
958 * Max IO Size is Page Size * the following:
959 * ((SGEs per frame - 1 for chain element) *
960 * Max Chain Depth) + 1 for no chain needed in last frame
962 * If user suggests a Max IO size to use, use the smaller of the
963 * user's value and the calculated value as long as the user's
964 * value is larger than 0. The user's value is in pages.
966 sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
967 sizeof(MPI2_SGE_SIMPLE64)) - 1;
968 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
969 cpi->maxio *= PAGE_SIZE;
970 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
972 cpi->maxio = sc->max_io_pages * PAGE_SIZE;
973 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
976 case XPT_GET_TRAN_SETTINGS:
978 struct ccb_trans_settings *cts;
979 struct ccb_trans_settings_sas *sas;
980 struct ccb_trans_settings_scsi *scsi;
981 struct mpssas_target *targ;
984 sas = &cts->xport_specific.sas;
985 scsi = &cts->proto_specific.scsi;
987 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
988 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
989 cts->ccb_h.target_id));
990 targ = &sassc->targets[cts->ccb_h.target_id];
991 if (targ->handle == 0x0) {
992 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
996 cts->protocol_version = SCSI_REV_SPC2;
997 cts->transport = XPORT_SAS;
998 cts->transport_version = 0;
1000 sas->valid = CTS_SAS_VALID_SPEED;
1001 switch (targ->linkrate) {
1003 sas->bitrate = 150000;
1006 sas->bitrate = 300000;
1009 sas->bitrate = 600000;
1015 cts->protocol = PROTO_SCSI;
1016 scsi->valid = CTS_SCSI_VALID_TQ;
1017 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1019 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1022 case XPT_CALC_GEOMETRY:
1023 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1024 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1027 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1028 mpssas_action_resetdev(sassc, ccb);
1033 mps_dprint(sassc->sc, MPS_XINFO,
1034 "mpssas_action faking success for abort or reset\n");
1035 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1038 mpssas_action_scsiio(sassc, ccb);
1040 #if __FreeBSD_version >= 900026
1042 mpssas_action_smpio(sassc, ccb);
1046 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1054 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1055 target_id_t target_id, lun_id_t lun_id)
1057 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1058 struct cam_path *path;
1060 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1061 ac_code, target_id, (uintmax_t)lun_id);
1063 if (xpt_create_path(&path, NULL,
1064 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1065 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1070 xpt_async(ac_code, path, NULL);
1071 xpt_free_path(path);
1075 mpssas_complete_all_commands(struct mps_softc *sc)
1077 struct mps_command *cm;
1082 mtx_assert(&sc->mps_mtx, MA_OWNED);
1084 /* complete all commands with a NULL reply */
1085 for (i = 1; i < sc->num_reqs; i++) {
1086 cm = &sc->commands[i];
1087 cm->cm_reply = NULL;
1090 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1091 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1093 if (cm->cm_complete != NULL) {
1094 mpssas_log_command(cm, MPS_RECOVERY,
1095 "completing cm %p state %x ccb %p for diag reset\n",
1096 cm, cm->cm_state, cm->cm_ccb);
1098 cm->cm_complete(sc, cm);
1102 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1103 mpssas_log_command(cm, MPS_RECOVERY,
1104 "waking up cm %p state %x ccb %p for diag reset\n",
1105 cm, cm->cm_state, cm->cm_ccb);
1110 if (cm->cm_sc->io_cmds_active != 0) {
1111 cm->cm_sc->io_cmds_active--;
1113 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1114 "io_cmds_active is out of sync - resynching to "
1118 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1119 /* this should never happen, but if it does, log */
1120 mpssas_log_command(cm, MPS_RECOVERY,
1121 "cm %p state %x flags 0x%x ccb %p during diag "
1122 "reset\n", cm, cm->cm_state, cm->cm_flags,
1129 mpssas_handle_reinit(struct mps_softc *sc)
1133 /* Go back into startup mode and freeze the simq, so that CAM
1134 * doesn't send any commands until after we've rediscovered all
1135 * targets and found the proper device handles for them.
1137 * After the reset, portenable will trigger discovery, and after all
1138 * discovery-related activities have finished, the simq will be
1141 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1142 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1143 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1144 mpssas_startup_increment(sc->sassc);
1146 /* notify CAM of a bus reset */
1147 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1150 /* complete and cleanup after all outstanding commands */
1151 mpssas_complete_all_commands(sc);
1153 mps_dprint(sc, MPS_INIT,
1154 "%s startup %u after command completion\n", __func__,
1155 sc->sassc->startup_refcount);
1157 /* zero all the target handles, since they may change after the
1158 * reset, and we have to rediscover all the targets and use the new
1161 for (i = 0; i < sc->sassc->maxtargets; i++) {
1162 if (sc->sassc->targets[i].outstanding != 0)
1163 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1164 i, sc->sassc->targets[i].outstanding);
1165 sc->sassc->targets[i].handle = 0x0;
1166 sc->sassc->targets[i].exp_dev_handle = 0x0;
1167 sc->sassc->targets[i].outstanding = 0;
1168 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1173 mpssas_tm_timeout(void *data)
1175 struct mps_command *tm = data;
1176 struct mps_softc *sc = tm->cm_sc;
1178 mtx_assert(&sc->mps_mtx, MA_OWNED);
1180 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1181 "task mgmt %p timed out\n", tm);
1186 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1188 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1189 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1190 unsigned int cm_count = 0;
1191 struct mps_command *cm;
1192 struct mpssas_target *targ;
1194 callout_stop(&tm->cm_callout);
1196 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1197 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1201 * Currently there should be no way we can hit this case. It only
1202 * happens when we have a failure to allocate chain frames, and
1203 * task management commands don't have S/G lists.
1204 * XXXSL So should it be an assertion?
1206 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1207 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1208 "This should not happen!\n", __func__, tm->cm_flags);
1209 mpssas_free_tm(sc, tm);
1213 if (reply == NULL) {
1214 mpssas_log_command(tm, MPS_RECOVERY,
1215 "NULL reset reply for tm %p\n", tm);
1216 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1217 /* this completion was due to a reset, just cleanup */
1219 mpssas_free_tm(sc, tm);
1222 /* we should have gotten a reply. */
1228 mpssas_log_command(tm, MPS_RECOVERY,
1229 "logical unit reset status 0x%x code 0x%x count %u\n",
1230 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1231 le32toh(reply->TerminationCount));
1233 /* See if there are any outstanding commands for this LUN.
1234 * This could be made more efficient by using a per-LU data
1235 * structure of some sort.
1237 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1238 if (cm->cm_lun == tm->cm_lun)
1242 if (cm_count == 0) {
1243 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1244 "logical unit %u finished recovery after reset\n",
1247 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1250 /* we've finished recovery for this logical unit. check and
1251 * see if some other logical unit has a timedout command
1252 * that needs to be processed.
1254 cm = TAILQ_FIRST(&targ->timedout_commands);
1256 mpssas_send_abort(sc, tm, cm);
1260 mpssas_free_tm(sc, tm);
1264 /* if we still have commands for this LUN, the reset
1265 * effectively failed, regardless of the status reported.
1266 * Escalate to a target reset.
1268 mpssas_log_command(tm, MPS_RECOVERY,
1269 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1271 mpssas_send_reset(sc, tm,
1272 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1277 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1279 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1280 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1281 struct mpssas_target *targ;
1283 callout_stop(&tm->cm_callout);
1285 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1286 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1290 * Currently there should be no way we can hit this case. It only
1291 * happens when we have a failure to allocate chain frames, and
1292 * task management commands don't have S/G lists.
1294 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1295 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1296 "This should not happen!\n", __func__, tm->cm_flags);
1297 mpssas_free_tm(sc, tm);
1301 if (reply == NULL) {
1302 mpssas_log_command(tm, MPS_RECOVERY,
1303 "NULL reset reply for tm %p\n", tm);
1304 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1305 /* this completion was due to a reset, just cleanup */
1307 mpssas_free_tm(sc, tm);
1310 /* we should have gotten a reply. */
1316 mpssas_log_command(tm, MPS_RECOVERY,
1317 "target reset status 0x%x code 0x%x count %u\n",
1318 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1319 le32toh(reply->TerminationCount));
1321 if (targ->outstanding == 0) {
1322 /* we've finished recovery for this target and all
1323 * of its logical units.
1325 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1326 "recovery finished after target reset\n");
1328 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1332 mpssas_free_tm(sc, tm);
1335 /* after a target reset, if this target still has
1336 * outstanding commands, the reset effectively failed,
1337 * regardless of the status reported. escalate.
1339 mpssas_log_command(tm, MPS_RECOVERY,
1340 "target reset complete for tm %p, but still have %u command(s)\n",
1341 tm, targ->outstanding);
1346 #define MPS_RESET_TIMEOUT 30
1349 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1351 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1352 struct mpssas_target *target;
1355 target = tm->cm_targ;
1356 if (target->handle == 0) {
1357 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1358 __func__, target->tid);
1362 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1363 req->DevHandle = htole16(target->handle);
1364 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1365 req->TaskType = type;
1367 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1368 /* XXX Need to handle invalid LUNs */
1369 MPS_SET_LUN(req->LUN, tm->cm_lun);
1370 tm->cm_targ->logical_unit_resets++;
1371 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1372 "sending logical unit reset\n");
1373 tm->cm_complete = mpssas_logical_unit_reset_complete;
1374 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1376 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1378 * Target reset method =
1379 * SAS Hard Link Reset / SATA Link Reset
1381 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1382 tm->cm_targ->target_resets++;
1383 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1384 "sending target reset\n");
1385 tm->cm_complete = mpssas_target_reset_complete;
1386 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1389 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1394 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1395 tm->cm_complete_data = (void *)tm;
1397 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1398 mpssas_tm_timeout, tm);
1400 err = mps_map_command(sc, tm);
1402 mpssas_log_command(tm, MPS_RECOVERY,
1403 "error %d sending reset type %u\n",
1411 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1413 struct mps_command *cm;
1414 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1415 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1416 struct mpssas_target *targ;
1418 callout_stop(&tm->cm_callout);
1420 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1421 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1425 * Currently there should be no way we can hit this case. It only
1426 * happens when we have a failure to allocate chain frames, and
1427 * task management commands don't have S/G lists.
1429 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1430 mpssas_log_command(tm, MPS_RECOVERY,
1431 "cm_flags = %#x for abort %p TaskMID %u!\n",
1432 tm->cm_flags, tm, le16toh(req->TaskMID));
1433 mpssas_free_tm(sc, tm);
1437 if (reply == NULL) {
1438 mpssas_log_command(tm, MPS_RECOVERY,
1439 "NULL abort reply for tm %p TaskMID %u\n",
1440 tm, le16toh(req->TaskMID));
1441 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1442 /* this completion was due to a reset, just cleanup */
1444 mpssas_free_tm(sc, tm);
1447 /* we should have gotten a reply. */
1453 mpssas_log_command(tm, MPS_RECOVERY,
1454 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1455 le16toh(req->TaskMID),
1456 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1457 le32toh(reply->TerminationCount));
1459 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1461 /* if there are no more timedout commands, we're done with
1462 * error recovery for this target.
1464 mpssas_log_command(tm, MPS_RECOVERY,
1465 "finished recovery after aborting TaskMID %u\n",
1466 le16toh(req->TaskMID));
1469 mpssas_free_tm(sc, tm);
1471 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1472 /* abort success, but we have more timedout commands to abort */
1473 mpssas_log_command(tm, MPS_RECOVERY,
1474 "continuing recovery after aborting TaskMID %u\n",
1475 le16toh(req->TaskMID));
1477 mpssas_send_abort(sc, tm, cm);
1480 /* we didn't get a command completion, so the abort
1481 * failed as far as we're concerned. escalate.
1483 mpssas_log_command(tm, MPS_RECOVERY,
1484 "abort failed for TaskMID %u tm %p\n",
1485 le16toh(req->TaskMID), tm);
1487 mpssas_send_reset(sc, tm,
1488 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1492 #define MPS_ABORT_TIMEOUT 5
1495 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1497 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1498 struct mpssas_target *targ;
1502 if (targ->handle == 0) {
1503 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1504 __func__, cm->cm_ccb->ccb_h.target_id);
1508 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1509 "Aborting command %p\n", cm);
1511 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1512 req->DevHandle = htole16(targ->handle);
1513 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1514 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1516 /* XXX Need to handle invalid LUNs */
1517 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1519 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1522 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1523 tm->cm_complete = mpssas_abort_complete;
1524 tm->cm_complete_data = (void *)tm;
1525 tm->cm_targ = cm->cm_targ;
1526 tm->cm_lun = cm->cm_lun;
1528 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1529 mpssas_tm_timeout, tm);
1533 mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1534 __func__, targ->tid);
1535 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1537 err = mps_map_command(sc, tm);
1539 mps_dprint(sc, MPS_RECOVERY,
1540 "error %d sending abort for cm %p SMID %u\n",
1541 err, cm, req->TaskMID);
1546 mpssas_scsiio_timeout(void *data)
1548 struct mps_softc *sc;
1549 struct mps_command *cm;
1550 struct mpssas_target *targ;
1552 cm = (struct mps_command *)data;
1556 mtx_assert(&sc->mps_mtx, MA_OWNED);
1558 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1561 * Run the interrupt handler to make sure it's not pending. This
1562 * isn't perfect because the command could have already completed
1563 * and been re-used, though this is unlikely.
1565 mps_intr_locked(sc);
1566 if (cm->cm_state == MPS_CM_STATE_FREE) {
1567 mpssas_log_command(cm, MPS_XINFO,
1568 "SCSI command %p almost timed out\n", cm);
1572 if (cm->cm_ccb == NULL) {
1573 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1580 mpssas_log_command(cm, MPS_ERROR, "command timeout %d cm %p target "
1581 "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
1584 /* XXX first, check the firmware state, to see if it's still
1585 * operational. if not, do a diag reset.
1587 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1588 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1589 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1591 if (targ->tm != NULL) {
1592 /* target already in recovery, just queue up another
1593 * timedout command to be processed later.
1595 mps_dprint(sc, MPS_RECOVERY,
1596 "queued timedout cm %p for processing by tm %p\n",
1599 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1600 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1603 /* start recovery by aborting the first timedout command */
1604 mpssas_send_abort(sc, targ->tm, cm);
1607 /* XXX queue this target up for recovery once a TM becomes
1608 * available. The firmware only has a limited number of
1609 * HighPriority credits for the high priority requests used
1610 * for task management, and we ran out.
1612 * Isilon: don't worry about this for now, since we have
1613 * more credits than disks in an enclosure, and limit
1614 * ourselves to one TM per target for recovery.
1616 mps_dprint(sc, MPS_RECOVERY,
1617 "timedout cm %p failed to allocate a tm\n", cm);
1623 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1625 MPI2_SCSI_IO_REQUEST *req;
1626 struct ccb_scsiio *csio;
1627 struct mps_softc *sc;
1628 struct mpssas_target *targ;
1629 struct mpssas_lun *lun;
1630 struct mps_command *cm;
1631 uint8_t i, lba_byte, *ref_tag_addr;
1632 uint16_t eedp_flags;
1633 uint32_t mpi_control;
1637 mtx_assert(&sc->mps_mtx, MA_OWNED);
1640 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1641 ("Target %d out of bounds in XPT_SCSI_IO\n",
1642 csio->ccb_h.target_id));
1643 targ = &sassc->targets[csio->ccb_h.target_id];
1644 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1645 if (targ->handle == 0x0) {
1646 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1647 __func__, csio->ccb_h.target_id);
1648 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1652 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1653 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1654 "supported %u\n", __func__, csio->ccb_h.target_id);
1655 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1660 * Sometimes, it is possible to get a command that is not "In
1661 * Progress" and was actually aborted by the upper layer. Check for
1662 * this here and complete the command without error.
1664 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1665 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1666 "target %u\n", __func__, csio->ccb_h.target_id);
1671 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1672 * that the volume has timed out. We want volumes to be enumerated
1673 * until they are deleted/removed, not just failed.
1675 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1676 if (targ->devinfo == 0)
1677 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1679 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1684 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1685 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1686 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1692 * If target has a reset in progress, freeze the devq and return. The
1693 * devq will be released when the TM reset is finished.
1695 if (targ->flags & MPSSAS_TARGET_INRESET) {
1696 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1697 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1698 __func__, targ->tid);
1699 xpt_freeze_devq(ccb->ccb_h.path, 1);
1704 cm = mps_alloc_command(sc);
1705 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1707 mps_free_command(sc, cm);
1709 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1710 xpt_freeze_simq(sassc->sim, 1);
1711 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1713 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1714 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1719 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1720 bzero(req, sizeof(*req));
1721 req->DevHandle = htole16(targ->handle);
1722 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1724 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1725 req->SenseBufferLength = MPS_SENSE_LEN;
1727 req->ChainOffset = 0;
1728 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1733 req->DataLength = htole32(csio->dxfer_len);
1734 req->BidirectionalDataLength = 0;
1735 req->IoFlags = htole16(csio->cdb_len);
1738 /* Note: BiDirectional transfers are not supported */
1739 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1741 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1742 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1745 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1746 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1750 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1754 if (csio->cdb_len == 32)
1755 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1757 * It looks like the hardware doesn't require an explicit tag
1758 * number for each transaction. SAM Task Management not supported
1761 switch (csio->tag_action) {
1762 case MSG_HEAD_OF_Q_TAG:
1763 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1765 case MSG_ORDERED_Q_TAG:
1766 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1769 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1771 case CAM_TAG_ACTION_NONE:
1772 case MSG_SIMPLE_Q_TAG:
1774 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1777 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1778 req->Control = htole32(mpi_control);
1779 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1780 mps_free_command(sc, cm);
1781 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1786 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1787 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1789 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1790 req->IoFlags = htole16(csio->cdb_len);
1793 * Check if EEDP is supported and enabled. If it is then check if the
1794 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1795 * is formatted for EEDP support. If all of this is true, set CDB up
1796 * for EEDP transfer.
1798 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1799 if (sc->eedp_enabled && eedp_flags) {
1800 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1801 if (lun->lun_id == csio->ccb_h.target_lun) {
1806 if ((lun != NULL) && (lun->eedp_formatted)) {
1807 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1808 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1809 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1810 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1811 req->EEDPFlags = htole16(eedp_flags);
1814 * If CDB less than 32, fill in Primary Ref Tag with
1815 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1816 * already there. Also, set protection bit. FreeBSD
1817 * currently does not support CDBs bigger than 16, but
1818 * the code doesn't hurt, and will be here for the
1821 if (csio->cdb_len != 32) {
1822 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1823 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1824 PrimaryReferenceTag;
1825 for (i = 0; i < 4; i++) {
1827 req->CDB.CDB32[lba_byte + i];
1830 req->CDB.EEDP32.PrimaryReferenceTag =
1831 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1832 req->CDB.EEDP32.PrimaryApplicationTagMask =
1834 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1838 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1839 req->EEDPFlags = htole16(eedp_flags);
1840 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1846 cm->cm_length = csio->dxfer_len;
1847 if (cm->cm_length != 0) {
1849 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1853 cm->cm_sge = &req->SGL;
1854 cm->cm_sglsize = (32 - 24) * 4;
1855 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1856 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1857 cm->cm_complete = mpssas_scsiio_complete;
1858 cm->cm_complete_data = ccb;
1860 cm->cm_lun = csio->ccb_h.target_lun;
1864 * If HBA is a WD and the command is not for a retry, try to build a
1865 * direct I/O message. If failed, or the command is for a retry, send
1866 * the I/O to the IR volume itself.
1868 if (sc->WD_valid_config) {
1869 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1870 mpssas_direct_drive_io(sassc, cm, ccb);
1872 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1876 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1877 mpssas_scsiio_timeout, cm, 0);
1880 targ->outstanding++;
1881 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1882 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1884 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1885 __func__, cm, ccb, targ->outstanding);
1887 mps_map_command(sc, cm);
1892 mps_response_code(struct mps_softc *sc, u8 response_code)
1896 switch (response_code) {
1897 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1898 desc = "task management request completed";
1900 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1901 desc = "invalid frame";
1903 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1904 desc = "task management request not supported";
1906 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1907 desc = "task management request failed";
1909 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1910 desc = "task management request succeeded";
1912 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1913 desc = "invalid lun";
1916 desc = "overlapped tag attempted";
1918 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1919 desc = "task queued, however not sent to target";
1925 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1926 response_code, desc);
1929 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1932 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1933 Mpi2SCSIIOReply_t *mpi_reply)
1937 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1938 MPI2_IOCSTATUS_MASK;
1939 u8 scsi_state = mpi_reply->SCSIState;
1940 u8 scsi_status = mpi_reply->SCSIStatus;
1941 char *desc_ioc_state = NULL;
1942 char *desc_scsi_status = NULL;
1943 char *desc_scsi_state = sc->tmp_string;
1944 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1946 if (log_info == 0x31170000)
1949 switch (ioc_status) {
1950 case MPI2_IOCSTATUS_SUCCESS:
1951 desc_ioc_state = "success";
1953 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1954 desc_ioc_state = "invalid function";
1956 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1957 desc_ioc_state = "scsi recovered error";
1959 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1960 desc_ioc_state = "scsi invalid dev handle";
1962 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1963 desc_ioc_state = "scsi device not there";
1965 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1966 desc_ioc_state = "scsi data overrun";
1968 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1969 desc_ioc_state = "scsi data underrun";
1971 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1972 desc_ioc_state = "scsi io data error";
1974 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1975 desc_ioc_state = "scsi protocol error";
1977 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1978 desc_ioc_state = "scsi task terminated";
1980 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1981 desc_ioc_state = "scsi residual mismatch";
1983 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1984 desc_ioc_state = "scsi task mgmt failed";
1986 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1987 desc_ioc_state = "scsi ioc terminated";
1989 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1990 desc_ioc_state = "scsi ext terminated";
1992 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1993 desc_ioc_state = "eedp guard error";
1995 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1996 desc_ioc_state = "eedp ref tag error";
1998 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1999 desc_ioc_state = "eedp app tag error";
2002 desc_ioc_state = "unknown";
2006 switch (scsi_status) {
2007 case MPI2_SCSI_STATUS_GOOD:
2008 desc_scsi_status = "good";
2010 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2011 desc_scsi_status = "check condition";
2013 case MPI2_SCSI_STATUS_CONDITION_MET:
2014 desc_scsi_status = "condition met";
2016 case MPI2_SCSI_STATUS_BUSY:
2017 desc_scsi_status = "busy";
2019 case MPI2_SCSI_STATUS_INTERMEDIATE:
2020 desc_scsi_status = "intermediate";
2022 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2023 desc_scsi_status = "intermediate condmet";
2025 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2026 desc_scsi_status = "reservation conflict";
2028 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2029 desc_scsi_status = "command terminated";
2031 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2032 desc_scsi_status = "task set full";
2034 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2035 desc_scsi_status = "aca active";
2037 case MPI2_SCSI_STATUS_TASK_ABORTED:
2038 desc_scsi_status = "task aborted";
2041 desc_scsi_status = "unknown";
2045 desc_scsi_state[0] = '\0';
2047 desc_scsi_state = " ";
2048 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2049 strcat(desc_scsi_state, "response info ");
2050 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2051 strcat(desc_scsi_state, "state terminated ");
2052 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2053 strcat(desc_scsi_state, "no status ");
2054 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2055 strcat(desc_scsi_state, "autosense failed ");
2056 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2057 strcat(desc_scsi_state, "autosense valid ");
2059 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2060 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2061 /* We can add more detail about underflow data here
2064 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2065 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2066 desc_scsi_state, scsi_state);
2068 if (sc->mps_debug & MPS_XINFO &&
2069 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2070 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2071 scsi_sense_print(csio);
2072 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2075 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2076 response_info = le32toh(mpi_reply->ResponseInfo);
2077 response_bytes = (u8 *)&response_info;
2078 mps_response_code(sc,response_bytes[0]);
2083 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2085 MPI2_SCSI_IO_REPLY *rep;
2087 struct ccb_scsiio *csio;
2088 struct mpssas_softc *sassc;
2089 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2090 u8 *TLR_bits, TLR_on;
2093 struct mpssas_target *target;
2094 target_id_t target_id;
2097 mps_dprint(sc, MPS_TRACE,
2098 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2099 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2100 cm->cm_targ->outstanding);
2102 callout_stop(&cm->cm_callout);
2103 mtx_assert(&sc->mps_mtx, MA_OWNED);
2106 ccb = cm->cm_complete_data;
2108 target_id = csio->ccb_h.target_id;
2109 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2111 * XXX KDM if the chain allocation fails, does it matter if we do
2112 * the sync and unload here? It is simpler to do it in every case,
2113 * assuming it doesn't cause problems.
2115 if (cm->cm_data != NULL) {
2116 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2117 dir = BUS_DMASYNC_POSTREAD;
2118 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2119 dir = BUS_DMASYNC_POSTWRITE;
2120 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2121 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2124 cm->cm_targ->completed++;
2125 cm->cm_targ->outstanding--;
2126 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2127 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2129 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2130 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2131 if (cm->cm_reply != NULL)
2132 mpssas_log_command(cm, MPS_RECOVERY,
2133 "completed timedout cm %p ccb %p during recovery "
2134 "ioc %x scsi %x state %x xfer %u\n",
2136 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2137 le32toh(rep->TransferCount));
2139 mpssas_log_command(cm, MPS_RECOVERY,
2140 "completed timedout cm %p ccb %p during recovery\n",
2142 } else if (cm->cm_targ->tm != NULL) {
2143 if (cm->cm_reply != NULL)
2144 mpssas_log_command(cm, MPS_RECOVERY,
2145 "completed cm %p ccb %p during recovery "
2146 "ioc %x scsi %x state %x xfer %u\n",
2148 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2149 le32toh(rep->TransferCount));
2151 mpssas_log_command(cm, MPS_RECOVERY,
2152 "completed cm %p ccb %p during recovery\n",
2154 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2155 mpssas_log_command(cm, MPS_RECOVERY,
2156 "reset completed cm %p ccb %p\n",
2160 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2162 * We ran into an error after we tried to map the command,
2163 * so we're getting a callback without queueing the command
2164 * to the hardware. So we set the status here, and it will
2165 * be retained below. We'll go through the "fast path",
2166 * because there can be no reply when we haven't actually
2167 * gone out to the hardware.
2169 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2172 * Currently the only error included in the mask is
2173 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2174 * chain frames. We need to freeze the queue until we get
2175 * a command that completed without this error, which will
2176 * hopefully have some chain frames attached that we can
2177 * use. If we wanted to get smarter about it, we would
2178 * only unfreeze the queue in this condition when we're
2179 * sure that we're getting some chain frames back. That's
2180 * probably unnecessary.
2182 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2183 xpt_freeze_simq(sassc->sim, 1);
2184 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2185 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2186 "freezing SIM queue\n");
2191 * If this is a Start Stop Unit command and it was issued by the driver
2192 * during shutdown, decrement the refcount to account for all of the
2193 * commands that were sent. All SSU commands should be completed before
2194 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2197 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2198 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2202 /* Take the fast path to completion */
2203 if (cm->cm_reply == NULL) {
2204 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2205 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2206 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2208 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2209 ccb->csio.scsi_status = SCSI_STATUS_OK;
2211 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2212 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2213 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2214 mps_dprint(sc, MPS_XINFO,
2215 "Unfreezing SIM queue\n");
2220 * There are two scenarios where the status won't be
2221 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2222 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2224 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2226 * Freeze the dev queue so that commands are
2227 * executed in the correct order after error
2230 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2231 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2233 mps_free_command(sc, cm);
2238 mpssas_log_command(cm, MPS_XINFO,
2239 "ioc %x scsi %x state %x xfer %u\n",
2240 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2241 le32toh(rep->TransferCount));
2244 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2245 * Volume if an error occurred (normal I/O retry). Use the original
2246 * CCB, but set a flag that this will be a retry so that it's sent to
2247 * the original volume. Free the command but reuse the CCB.
2249 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2250 mps_free_command(sc, cm);
2251 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2252 mpssas_action_scsiio(sassc, ccb);
2255 ccb->ccb_h.sim_priv.entries[0].field = 0;
2257 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2258 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2259 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2261 case MPI2_IOCSTATUS_SUCCESS:
2262 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2264 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2265 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2266 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2268 /* Completion failed at the transport level. */
2269 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2270 MPI2_SCSI_STATE_TERMINATED)) {
2271 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2275 /* In a modern packetized environment, an autosense failure
2276 * implies that there's not much else that can be done to
2277 * recover the command.
2279 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2280 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2285 * CAM doesn't care about SAS Response Info data, but if this is
2286 * the state check if TLR should be done. If not, clear the
2287 * TLR_bits for the target.
2289 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2290 ((le32toh(rep->ResponseInfo) &
2291 MPI2_SCSI_RI_MASK_REASONCODE) ==
2292 MPS_SCSI_RI_INVALID_FRAME)) {
2293 sc->mapping_table[target_id].TLR_bits =
2294 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2298 * Intentionally override the normal SCSI status reporting
2299 * for these two cases. These are likely to happen in a
2300 * multi-initiator environment, and we want to make sure that
2301 * CAM retries these commands rather than fail them.
2303 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2304 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2305 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2309 /* Handle normal status and sense */
2310 csio->scsi_status = rep->SCSIStatus;
2311 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2312 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2314 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2316 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2317 int sense_len, returned_sense_len;
2319 returned_sense_len = min(le32toh(rep->SenseCount),
2320 sizeof(struct scsi_sense_data));
2321 if (returned_sense_len < ccb->csio.sense_len)
2322 ccb->csio.sense_resid = ccb->csio.sense_len -
2325 ccb->csio.sense_resid = 0;
2327 sense_len = min(returned_sense_len,
2328 ccb->csio.sense_len - ccb->csio.sense_resid);
2329 bzero(&ccb->csio.sense_data,
2330 sizeof(ccb->csio.sense_data));
2331 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2332 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2336 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2337 * and it's page code 0 (Supported Page List), and there is
2338 * inquiry data, and this is for a sequential access device, and
2339 * the device is an SSP target, and TLR is supported by the
2340 * controller, turn the TLR_bits value ON if page 0x90 is
2343 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2344 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2345 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2346 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2347 (csio->data_ptr != NULL) &&
2348 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2349 (sc->control_TLR) &&
2350 (sc->mapping_table[target_id].device_info &
2351 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2352 vpd_list = (struct scsi_vpd_supported_page_list *)
2354 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2355 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2356 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2357 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2358 csio->cdb_io.cdb_bytes[4];
2359 alloc_len -= csio->resid;
2360 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2361 if (vpd_list->list[i] == 0x90) {
2369 * If this is a SATA direct-access end device, mark it so that
2370 * a SCSI StartStopUnit command will be sent to it when the
2371 * driver is being shutdown.
2373 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2374 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2375 (sc->mapping_table[target_id].device_info &
2376 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2377 ((sc->mapping_table[target_id].device_info &
2378 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2379 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2380 target = &sassc->targets[target_id];
2381 target->supports_SSU = TRUE;
2382 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2386 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2387 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2389 * If devinfo is 0 this will be a volume. In that case don't
2390 * tell CAM that the volume is not there. We want volumes to
2391 * be enumerated until they are deleted/removed, not just
2394 if (cm->cm_targ->devinfo == 0)
2395 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2397 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2399 case MPI2_IOCSTATUS_INVALID_SGL:
2400 mps_print_scsiio_cmd(sc, cm);
2401 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2403 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2405 * This is one of the responses that comes back when an I/O
2406 * has been aborted. If it is because of a timeout that we
2407 * initiated, just set the status to CAM_CMD_TIMEOUT.
2408 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2409 * command is the same (it gets retried, subject to the
2410 * retry counter), the only difference is what gets printed
2413 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2414 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2416 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2418 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2419 /* resid is ignored for this condition */
2421 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2423 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2424 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2426 * These can sometimes be transient transport-related
2427 * errors, and sometimes persistent drive-related errors.
2428 * We used to retry these without decrementing the retry
2429 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2430 * we hit a persistent drive problem that returns one of
2431 * these error codes, we would retry indefinitely. So,
2432 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2433 * count and avoid infinite retries. We're taking the
2434 * potential risk of flagging false failures in the event
2435 * of a topology-related error (e.g. a SAS expander problem
2436 * causes a command addressed to a drive to fail), but
2437 * avoiding getting into an infinite retry loop.
2439 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2440 mpssas_log_command(cm, MPS_INFO,
2441 "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2442 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2443 rep->SCSIStatus, rep->SCSIState,
2444 le32toh(rep->TransferCount));
2446 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2447 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2448 case MPI2_IOCSTATUS_INVALID_VPID:
2449 case MPI2_IOCSTATUS_INVALID_FIELD:
2450 case MPI2_IOCSTATUS_INVALID_STATE:
2451 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2452 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2453 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2454 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2455 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2457 mpssas_log_command(cm, MPS_XINFO,
2458 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2459 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2460 rep->SCSIStatus, rep->SCSIState,
2461 le32toh(rep->TransferCount));
2462 csio->resid = cm->cm_length;
2463 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2467 mps_sc_failed_io_info(sc,csio,rep);
2469 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2470 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2471 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2472 mps_dprint(sc, MPS_XINFO, "Command completed, "
2473 "unfreezing SIM queue\n");
2476 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2477 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2478 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2481 mps_free_command(sc, cm);
2485 /* All Request reached here are Endian safe */
2487 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2489 pMpi2SCSIIORequest_t pIO_req;
2490 struct mps_softc *sc = sassc->sc;
2492 uint32_t physLBA, stripe_offset, stripe_unit;
2493 uint32_t io_size, column;
2494 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2497 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2498 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2499 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2500 * bit different than the 10/16 CDBs, handle them separately.
2502 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2503 CDB = pIO_req->CDB.CDB32;
2506 * Handle 6 byte CDBs.
2508 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2509 (CDB[0] == WRITE_6))) {
2511 * Get the transfer size in blocks.
2513 io_size = (cm->cm_length >> sc->DD_block_exponent);
2516 * Get virtual LBA given in the CDB.
2518 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2519 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2522 * Check that LBA range for I/O does not exceed volume's
2525 if ((virtLBA + (uint64_t)io_size - 1) <=
2528 * Check if the I/O crosses a stripe boundary. If not,
2529 * translate the virtual LBA to a physical LBA and set
2530 * the DevHandle for the PhysDisk to be used. If it
2531 * does cross a boundry, do normal I/O. To get the
2532 * right DevHandle to use, get the map number for the
2533 * column, then use that map number to look up the
2534 * DevHandle of the PhysDisk.
2536 stripe_offset = (uint32_t)virtLBA &
2537 (sc->DD_stripe_size - 1);
2538 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2539 physLBA = (uint32_t)virtLBA >>
2540 sc->DD_stripe_exponent;
2541 stripe_unit = physLBA / sc->DD_num_phys_disks;
2542 column = physLBA % sc->DD_num_phys_disks;
2543 pIO_req->DevHandle =
2544 htole16(sc->DD_column_map[column].dev_handle);
2545 /* ???? Is this endian safe*/
2546 cm->cm_desc.SCSIIO.DevHandle =
2549 physLBA = (stripe_unit <<
2550 sc->DD_stripe_exponent) + stripe_offset;
2551 ptrLBA = &pIO_req->CDB.CDB32[1];
2552 physLBA_byte = (uint8_t)(physLBA >> 16);
2553 *ptrLBA = physLBA_byte;
2554 ptrLBA = &pIO_req->CDB.CDB32[2];
2555 physLBA_byte = (uint8_t)(physLBA >> 8);
2556 *ptrLBA = physLBA_byte;
2557 ptrLBA = &pIO_req->CDB.CDB32[3];
2558 physLBA_byte = (uint8_t)physLBA;
2559 *ptrLBA = physLBA_byte;
2562 * Set flag that Direct Drive I/O is
2565 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2572 * Handle 10, 12 or 16 byte CDBs.
2574 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2575 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2576 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2577 (CDB[0] == WRITE_12))) {
2579 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2580 * are 0. If not, this is accessing beyond 2TB so handle it in
2581 * the else section. 10-byte and 12-byte CDB's are OK.
2582 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2583 * ready to accept 12byte CDB for Direct IOs.
2585 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2586 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2587 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2589 * Get the transfer size in blocks.
2591 io_size = (cm->cm_length >> sc->DD_block_exponent);
2594 * Get virtual LBA. Point to correct lower 4 bytes of
2595 * LBA in the CDB depending on command.
2597 lba_idx = ((CDB[0] == READ_12) ||
2598 (CDB[0] == WRITE_12) ||
2599 (CDB[0] == READ_10) ||
2600 (CDB[0] == WRITE_10))? 2 : 6;
2601 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2602 ((uint64_t)CDB[lba_idx + 1] << 16) |
2603 ((uint64_t)CDB[lba_idx + 2] << 8) |
2604 (uint64_t)CDB[lba_idx + 3];
2607 * Check that LBA range for I/O does not exceed volume's
2610 if ((virtLBA + (uint64_t)io_size - 1) <=
2613 * Check if the I/O crosses a stripe boundary.
2614 * If not, translate the virtual LBA to a
2615 * physical LBA and set the DevHandle for the
2616 * PhysDisk to be used. If it does cross a
2617 * boundry, do normal I/O. To get the right
2618 * DevHandle to use, get the map number for the
2619 * column, then use that map number to look up
2620 * the DevHandle of the PhysDisk.
2622 stripe_offset = (uint32_t)virtLBA &
2623 (sc->DD_stripe_size - 1);
2624 if ((stripe_offset + io_size) <=
2625 sc->DD_stripe_size) {
2626 physLBA = (uint32_t)virtLBA >>
2627 sc->DD_stripe_exponent;
2628 stripe_unit = physLBA /
2629 sc->DD_num_phys_disks;
2631 sc->DD_num_phys_disks;
2632 pIO_req->DevHandle =
2633 htole16(sc->DD_column_map[column].
2635 cm->cm_desc.SCSIIO.DevHandle =
2638 physLBA = (stripe_unit <<
2639 sc->DD_stripe_exponent) +
2642 &pIO_req->CDB.CDB32[lba_idx];
2643 physLBA_byte = (uint8_t)(physLBA >> 24);
2644 *ptrLBA = physLBA_byte;
2646 &pIO_req->CDB.CDB32[lba_idx + 1];
2647 physLBA_byte = (uint8_t)(physLBA >> 16);
2648 *ptrLBA = physLBA_byte;
2650 &pIO_req->CDB.CDB32[lba_idx + 2];
2651 physLBA_byte = (uint8_t)(physLBA >> 8);
2652 *ptrLBA = physLBA_byte;
2654 &pIO_req->CDB.CDB32[lba_idx + 3];
2655 physLBA_byte = (uint8_t)physLBA;
2656 *ptrLBA = physLBA_byte;
2659 * Set flag that Direct Drive I/O is
2662 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2667 * 16-byte CDB and the upper 4 bytes of the CDB are not
2668 * 0. Get the transfer size in blocks.
2670 io_size = (cm->cm_length >> sc->DD_block_exponent);
2675 virtLBA = ((uint64_t)CDB[2] << 54) |
2676 ((uint64_t)CDB[3] << 48) |
2677 ((uint64_t)CDB[4] << 40) |
2678 ((uint64_t)CDB[5] << 32) |
2679 ((uint64_t)CDB[6] << 24) |
2680 ((uint64_t)CDB[7] << 16) |
2681 ((uint64_t)CDB[8] << 8) |
2685 * Check that LBA range for I/O does not exceed volume's
2688 if ((virtLBA + (uint64_t)io_size - 1) <=
2691 * Check if the I/O crosses a stripe boundary.
2692 * If not, translate the virtual LBA to a
2693 * physical LBA and set the DevHandle for the
2694 * PhysDisk to be used. If it does cross a
2695 * boundry, do normal I/O. To get the right
2696 * DevHandle to use, get the map number for the
2697 * column, then use that map number to look up
2698 * the DevHandle of the PhysDisk.
2700 stripe_offset = (uint32_t)virtLBA &
2701 (sc->DD_stripe_size - 1);
2702 if ((stripe_offset + io_size) <=
2703 sc->DD_stripe_size) {
2704 physLBA = (uint32_t)(virtLBA >>
2705 sc->DD_stripe_exponent);
2706 stripe_unit = physLBA /
2707 sc->DD_num_phys_disks;
2709 sc->DD_num_phys_disks;
2710 pIO_req->DevHandle =
2711 htole16(sc->DD_column_map[column].
2713 cm->cm_desc.SCSIIO.DevHandle =
2716 physLBA = (stripe_unit <<
2717 sc->DD_stripe_exponent) +
2721 * Set upper 4 bytes of LBA to 0. We
2722 * assume that the phys disks are less
2723 * than 2 TB's in size. Then, set the
2726 pIO_req->CDB.CDB32[2] = 0;
2727 pIO_req->CDB.CDB32[3] = 0;
2728 pIO_req->CDB.CDB32[4] = 0;
2729 pIO_req->CDB.CDB32[5] = 0;
2730 ptrLBA = &pIO_req->CDB.CDB32[6];
2731 physLBA_byte = (uint8_t)(physLBA >> 24);
2732 *ptrLBA = physLBA_byte;
2733 ptrLBA = &pIO_req->CDB.CDB32[7];
2734 physLBA_byte = (uint8_t)(physLBA >> 16);
2735 *ptrLBA = physLBA_byte;
2736 ptrLBA = &pIO_req->CDB.CDB32[8];
2737 physLBA_byte = (uint8_t)(physLBA >> 8);
2738 *ptrLBA = physLBA_byte;
2739 ptrLBA = &pIO_req->CDB.CDB32[9];
2740 physLBA_byte = (uint8_t)physLBA;
2741 *ptrLBA = physLBA_byte;
2744 * Set flag that Direct Drive I/O is
2747 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2754 #if __FreeBSD_version >= 900026
2756 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2758 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2759 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2763 ccb = cm->cm_complete_data;
2766 * Currently there should be no way we can hit this case. It only
2767 * happens when we have a failure to allocate chain frames, and SMP
2768 * commands require two S/G elements only. That should be handled
2769 * in the standard request size.
2771 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2772 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2773 __func__, cm->cm_flags);
2774 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2778 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2780 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2781 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2785 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2786 sasaddr = le32toh(req->SASAddress.Low);
2787 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2789 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2790 MPI2_IOCSTATUS_SUCCESS ||
2791 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2792 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2793 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2794 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2798 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2799 "%#jx completed successfully\n", __func__,
2800 (uintmax_t)sasaddr);
2802 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2803 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2805 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2809 * We sync in both directions because we had DMAs in the S/G list
2810 * in both directions.
2812 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2813 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2814 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2815 mps_free_command(sc, cm);
2820 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2822 struct mps_command *cm;
2823 uint8_t *request, *response;
2824 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2825 struct mps_softc *sc;
2832 * XXX We don't yet support physical addresses here.
2834 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2835 case CAM_DATA_PADDR:
2836 case CAM_DATA_SG_PADDR:
2837 mps_dprint(sc, MPS_ERROR,
2838 "%s: physical addresses not supported\n", __func__);
2839 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2844 * The chip does not support more than one buffer for the
2845 * request or response.
2847 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2848 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2849 mps_dprint(sc, MPS_ERROR,
2850 "%s: multiple request or response "
2851 "buffer segments not supported for SMP\n",
2853 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2859 * The CAM_SCATTER_VALID flag was originally implemented
2860 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2861 * We have two. So, just take that flag to mean that we
2862 * might have S/G lists, and look at the S/G segment count
2863 * to figure out whether that is the case for each individual
2866 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2867 bus_dma_segment_t *req_sg;
2869 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2870 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2872 request = ccb->smpio.smp_request;
2874 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2875 bus_dma_segment_t *rsp_sg;
2877 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2878 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2880 response = ccb->smpio.smp_response;
2882 case CAM_DATA_VADDR:
2883 request = ccb->smpio.smp_request;
2884 response = ccb->smpio.smp_response;
2887 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2892 cm = mps_alloc_command(sc);
2894 mps_dprint(sc, MPS_ERROR,
2895 "%s: cannot allocate command\n", __func__);
2896 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2901 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2902 bzero(req, sizeof(*req));
2903 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2905 /* Allow the chip to use any route to this SAS address. */
2906 req->PhysicalPort = 0xff;
2908 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2910 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2912 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2913 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2915 mpi_init_sge(cm, req, &req->SGL);
2918 * Set up a uio to pass into mps_map_command(). This allows us to
2919 * do one map command, and one busdma call in there.
2921 cm->cm_uio.uio_iov = cm->cm_iovec;
2922 cm->cm_uio.uio_iovcnt = 2;
2923 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2926 * The read/write flag isn't used by busdma, but set it just in
2927 * case. This isn't exactly accurate, either, since we're going in
2930 cm->cm_uio.uio_rw = UIO_WRITE;
2932 cm->cm_iovec[0].iov_base = request;
2933 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2934 cm->cm_iovec[1].iov_base = response;
2935 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2937 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2938 cm->cm_iovec[1].iov_len;
2941 * Trigger a warning message in mps_data_cb() for the user if we
2942 * wind up exceeding two S/G segments. The chip expects one
2943 * segment for the request and another for the response.
2945 cm->cm_max_segs = 2;
2947 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2948 cm->cm_complete = mpssas_smpio_complete;
2949 cm->cm_complete_data = ccb;
2952 * Tell the mapping code that we're using a uio, and that this is
2953 * an SMP passthrough request. There is a little special-case
2954 * logic there (in mps_data_cb()) to handle the bidirectional
2957 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2958 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2960 /* The chip data format is little endian. */
2961 req->SASAddress.High = htole32(sasaddr >> 32);
2962 req->SASAddress.Low = htole32(sasaddr);
2965 * XXX Note that we don't have a timeout/abort mechanism here.
2966 * From the manual, it looks like task management requests only
2967 * work for SCSI IO and SATA passthrough requests. We may need to
2968 * have a mechanism to retry requests in the event of a chip reset
2969 * at least. Hopefully the chip will insure that any errors short
2970 * of that are relayed back to the driver.
2972 error = mps_map_command(sc, cm);
2973 if ((error != 0) && (error != EINPROGRESS)) {
2974 mps_dprint(sc, MPS_ERROR,
2975 "%s: error %d returned from mps_map_command()\n",
2983 mps_free_command(sc, cm);
2984 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2991 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2993 struct mps_softc *sc;
2994 struct mpssas_target *targ;
2995 uint64_t sasaddr = 0;
3000 * Make sure the target exists.
3002 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3003 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3004 targ = &sassc->targets[ccb->ccb_h.target_id];
3005 if (targ->handle == 0x0) {
3006 mps_dprint(sc, MPS_ERROR,
3007 "%s: target %d does not exist!\n", __func__,
3008 ccb->ccb_h.target_id);
3009 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3015 * If this device has an embedded SMP target, we'll talk to it
3017 * figure out what the expander's address is.
3019 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3020 sasaddr = targ->sasaddr;
3023 * If we don't have a SAS address for the expander yet, try
3024 * grabbing it from the page 0x83 information cached in the
3025 * transport layer for this target. LSI expanders report the
3026 * expander SAS address as the port-associated SAS address in
3027 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3030 * XXX KDM disable this for now, but leave it commented out so that
3031 * it is obvious that this is another possible way to get the SAS
3034 * The parent handle method below is a little more reliable, and
3035 * the other benefit is that it works for devices other than SES
3036 * devices. So you can send a SMP request to a da(4) device and it
3037 * will get routed to the expander that device is attached to.
3038 * (Assuming the da(4) device doesn't contain an SMP target...)
3042 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3046 * If we still don't have a SAS address for the expander, look for
3047 * the parent device of this device, which is probably the expander.
3050 #ifdef OLD_MPS_PROBE
3051 struct mpssas_target *parent_target;
3054 if (targ->parent_handle == 0x0) {
3055 mps_dprint(sc, MPS_ERROR,
3056 "%s: handle %d does not have a valid "
3057 "parent handle!\n", __func__, targ->handle);
3058 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3061 #ifdef OLD_MPS_PROBE
3062 parent_target = mpssas_find_target_by_handle(sassc, 0,
3063 targ->parent_handle);
3065 if (parent_target == NULL) {
3066 mps_dprint(sc, MPS_ERROR,
3067 "%s: handle %d does not have a valid "
3068 "parent target!\n", __func__, targ->handle);
3069 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3073 if ((parent_target->devinfo &
3074 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3075 mps_dprint(sc, MPS_ERROR,
3076 "%s: handle %d parent %d does not "
3077 "have an SMP target!\n", __func__,
3078 targ->handle, parent_target->handle);
3079 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3084 sasaddr = parent_target->sasaddr;
3085 #else /* OLD_MPS_PROBE */
3086 if ((targ->parent_devinfo &
3087 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3088 mps_dprint(sc, MPS_ERROR,
3089 "%s: handle %d parent %d does not "
3090 "have an SMP target!\n", __func__,
3091 targ->handle, targ->parent_handle);
3092 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3096 if (targ->parent_sasaddr == 0x0) {
3097 mps_dprint(sc, MPS_ERROR,
3098 "%s: handle %d parent handle %d does "
3099 "not have a valid SAS address!\n",
3100 __func__, targ->handle, targ->parent_handle);
3101 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3105 sasaddr = targ->parent_sasaddr;
3106 #endif /* OLD_MPS_PROBE */
3111 mps_dprint(sc, MPS_INFO,
3112 "%s: unable to find SAS address for handle %d\n",
3113 __func__, targ->handle);
3114 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3117 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3125 #endif //__FreeBSD_version >= 900026
3128 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3130 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3131 struct mps_softc *sc;
3132 struct mps_command *tm;
3133 struct mpssas_target *targ;
3135 MPS_FUNCTRACE(sassc->sc);
3136 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3138 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3139 ("Target %d out of bounds in XPT_RESET_DEV\n",
3140 ccb->ccb_h.target_id));
3142 tm = mps_alloc_command(sc);
3144 mps_dprint(sc, MPS_ERROR,
3145 "command alloc failure in mpssas_action_resetdev\n");
3146 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3151 targ = &sassc->targets[ccb->ccb_h.target_id];
3152 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3153 req->DevHandle = htole16(targ->handle);
3154 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3155 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3157 /* SAS Hard Link Reset / SATA Link Reset */
3158 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3161 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3162 tm->cm_complete = mpssas_resetdev_complete;
3163 tm->cm_complete_data = ccb;
3165 targ->flags |= MPSSAS_TARGET_INRESET;
3167 mps_map_command(sc, tm);
3171 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3173 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3177 mtx_assert(&sc->mps_mtx, MA_OWNED);
3179 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3180 ccb = tm->cm_complete_data;
3183 * Currently there should be no way we can hit this case. It only
3184 * happens when we have a failure to allocate chain frames, and
3185 * task management commands don't have S/G lists.
3187 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3188 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3190 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3192 mps_dprint(sc, MPS_ERROR,
3193 "%s: cm_flags = %#x for reset of handle %#04x! "
3194 "This should not happen!\n", __func__, tm->cm_flags,
3196 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3200 mps_dprint(sc, MPS_XINFO,
3201 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3202 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3204 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3205 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3206 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3210 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3214 mpssas_free_tm(sc, tm);
3219 mpssas_poll(struct cam_sim *sim)
3221 struct mpssas_softc *sassc;
3223 sassc = cam_sim_softc(sim);
3225 if (sassc->sc->mps_debug & MPS_TRACE) {
3226 /* frequent debug messages during a panic just slow
3227 * everything down too much.
3229 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3230 sassc->sc->mps_debug &= ~MPS_TRACE;
3233 mps_intr_locked(sassc->sc);
3237 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3240 struct mps_softc *sc;
3242 sc = (struct mps_softc *)callback_arg;
3245 #if (__FreeBSD_version >= 1000006) || \
3246 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3247 case AC_ADVINFO_CHANGED: {
3248 struct mpssas_target *target;
3249 struct mpssas_softc *sassc;
3250 struct scsi_read_capacity_data_long rcap_buf;
3251 struct ccb_dev_advinfo cdai;
3252 struct mpssas_lun *lun;
3257 buftype = (uintptr_t)arg;
3263 * We're only interested in read capacity data changes.
3265 if (buftype != CDAI_TYPE_RCAPLONG)
3269 * We should have a handle for this, but check to make sure.
3271 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3272 ("Target %d out of bounds in mpssas_async\n",
3273 xpt_path_target_id(path)));
3274 target = &sassc->targets[xpt_path_target_id(path)];
3275 if (target->handle == 0)
3278 lunid = xpt_path_lun_id(path);
3280 SLIST_FOREACH(lun, &target->luns, lun_link) {
3281 if (lun->lun_id == lunid) {
3287 if (found_lun == 0) {
3288 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3291 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3292 "LUN for EEDP support.\n");
3295 lun->lun_id = lunid;
3296 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3299 bzero(&rcap_buf, sizeof(rcap_buf));
3300 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3301 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3302 cdai.ccb_h.flags = CAM_DIR_IN;
3303 cdai.buftype = CDAI_TYPE_RCAPLONG;
3304 #if (__FreeBSD_version >= 1100061) || \
3305 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3306 cdai.flags = CDAI_FLAG_NONE;
3310 cdai.bufsiz = sizeof(rcap_buf);
3311 cdai.buf = (uint8_t *)&rcap_buf;
3312 xpt_action((union ccb *)&cdai);
3313 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3314 cam_release_devq(cdai.ccb_h.path,
3317 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3318 && (rcap_buf.prot & SRC16_PROT_EN)) {
3319 lun->eedp_formatted = TRUE;
3320 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3322 lun->eedp_formatted = FALSE;
3323 lun->eedp_block_size = 0;
3328 case AC_FOUND_DEVICE: {
3329 struct ccb_getdev *cgd;
3332 mpssas_check_eedp(sc, path, cgd);
3341 #if (__FreeBSD_version < 901503) || \
3342 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3344 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3345 struct ccb_getdev *cgd)
3347 struct mpssas_softc *sassc = sc->sassc;
3348 struct ccb_scsiio *csio;
3349 struct scsi_read_capacity_16 *scsi_cmd;
3350 struct scsi_read_capacity_eedp *rcap_buf;
3352 target_id_t targetid;
3355 struct cam_path *local_path;
3356 struct mpssas_target *target;
3357 struct mpssas_lun *lun;
3362 pathid = cam_sim_path(sassc->sim);
3363 targetid = xpt_path_target_id(path);
3364 lunid = xpt_path_lun_id(path);
3366 KASSERT(targetid < sassc->maxtargets,
3367 ("Target %d out of bounds in mpssas_check_eedp\n",
3369 target = &sassc->targets[targetid];
3370 if (target->handle == 0x0)
3374 * Determine if the device is EEDP capable.
3376 * If this flag is set in the inquiry data,
3377 * the device supports protection information,
3378 * and must support the 16 byte read
3379 * capacity command, otherwise continue without
3380 * sending read cap 16
3382 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3386 * Issue a READ CAPACITY 16 command. This info
3387 * is used to determine if the LUN is formatted
3390 ccb = xpt_alloc_ccb_nowait();
3392 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3393 "for EEDP support.\n");
3397 if (xpt_create_path(&local_path, xpt_periph,
3398 pathid, targetid, lunid) != CAM_REQ_CMP) {
3399 mps_dprint(sc, MPS_ERROR, "Unable to create "
3400 "path for EEDP support\n");
3406 * If LUN is already in list, don't create a new
3410 SLIST_FOREACH(lun, &target->luns, lun_link) {
3411 if (lun->lun_id == lunid) {
3417 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3420 mps_dprint(sc, MPS_ERROR,
3421 "Unable to alloc LUN for EEDP support.\n");
3422 xpt_free_path(local_path);
3426 lun->lun_id = lunid;
3427 SLIST_INSERT_HEAD(&target->luns, lun,
3431 xpt_path_string(local_path, path_str, sizeof(path_str));
3433 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3434 path_str, target->handle);
3437 * Issue a READ CAPACITY 16 command for the LUN.
3438 * The mpssas_read_cap_done function will load
3439 * the read cap info into the LUN struct.
3441 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3442 M_MPT2, M_NOWAIT | M_ZERO);
3443 if (rcap_buf == NULL) {
3444 mps_dprint(sc, MPS_FAULT,
3445 "Unable to alloc read capacity buffer for EEDP support.\n");
3446 xpt_free_path(ccb->ccb_h.path);
3450 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3452 csio->ccb_h.func_code = XPT_SCSI_IO;
3453 csio->ccb_h.flags = CAM_DIR_IN;
3454 csio->ccb_h.retry_count = 4;
3455 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3456 csio->ccb_h.timeout = 60000;
3457 csio->data_ptr = (uint8_t *)rcap_buf;
3458 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3459 csio->sense_len = MPS_SENSE_LEN;
3460 csio->cdb_len = sizeof(*scsi_cmd);
3461 csio->tag_action = MSG_SIMPLE_Q_TAG;
3463 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3464 bzero(scsi_cmd, sizeof(*scsi_cmd));
3465 scsi_cmd->opcode = 0x9E;
3466 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3467 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3469 ccb->ccb_h.ppriv_ptr1 = sassc;
3474 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3476 struct mpssas_softc *sassc;
3477 struct mpssas_target *target;
3478 struct mpssas_lun *lun;
3479 struct scsi_read_capacity_eedp *rcap_buf;
3481 if (done_ccb == NULL)
3484 /* Driver need to release devq, it Scsi command is
3485 * generated by driver internally.
3486 * Currently there is a single place where driver
3487 * calls scsi command internally. In future if driver
3488 * calls more scsi command internally, it needs to release
3489 * devq internally, since those command will not go back to
3492 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3493 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3494 xpt_release_devq(done_ccb->ccb_h.path,
3495 /*count*/ 1, /*run_queue*/TRUE);
3498 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3501 * Get the LUN ID for the path and look it up in the LUN list for the
3504 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3505 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3506 ("Target %d out of bounds in mpssas_read_cap_done\n",
3507 done_ccb->ccb_h.target_id));
3508 target = &sassc->targets[done_ccb->ccb_h.target_id];
3509 SLIST_FOREACH(lun, &target->luns, lun_link) {
3510 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3514 * Got the LUN in the target's LUN list. Fill it in
3515 * with EEDP info. If the READ CAP 16 command had some
3516 * SCSI error (common if command is not supported), mark
3517 * the lun as not supporting EEDP and set the block size
3520 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3521 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3522 lun->eedp_formatted = FALSE;
3523 lun->eedp_block_size = 0;
3527 if (rcap_buf->protect & 0x01) {
3528 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3529 "target ID %d is formatted for EEDP "
3530 "support.\n", done_ccb->ccb_h.target_lun,
3531 done_ccb->ccb_h.target_id);
3532 lun->eedp_formatted = TRUE;
3533 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3538 // Finished with this CCB and path.
3539 free(rcap_buf, M_MPT2);
3540 xpt_free_path(done_ccb->ccb_h.path);
3541 xpt_free_ccb(done_ccb);
3543 #endif /* (__FreeBSD_version < 901503) || \
3544 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3547 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3548 struct mpssas_target *target, lun_id_t lun_id)
3554 * Set the INRESET flag for this target so that no I/O will be sent to
3555 * the target until the reset has completed. If an I/O request does
3556 * happen, the devq will be frozen. The CCB holds the path which is
3557 * used to release the devq. The devq is released and the CCB is freed
3558 * when the TM completes.
3560 ccb = xpt_alloc_ccb_nowait();
3562 path_id = cam_sim_path(sc->sassc->sim);
3563 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3564 target->tid, lun_id) != CAM_REQ_CMP) {
3568 tm->cm_targ = target;
3569 target->flags |= MPSSAS_TARGET_INRESET;
3575 mpssas_startup(struct mps_softc *sc)
3579 * Send the port enable message and set the wait_for_port_enable flag.
3580 * This flag helps to keep the simq frozen until all discovery events
3583 sc->wait_for_port_enable = 1;
3584 mpssas_send_portenable(sc);
3589 mpssas_send_portenable(struct mps_softc *sc)
3591 MPI2_PORT_ENABLE_REQUEST *request;
3592 struct mps_command *cm;
3596 if ((cm = mps_alloc_command(sc)) == NULL)
3598 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3599 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3600 request->MsgFlags = 0;
3602 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3603 cm->cm_complete = mpssas_portenable_complete;
3607 mps_map_command(sc, cm);
3608 mps_dprint(sc, MPS_XINFO,
3609 "mps_send_portenable finished cm %p req %p complete %p\n",
3610 cm, cm->cm_req, cm->cm_complete);
3615 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3617 MPI2_PORT_ENABLE_REPLY *reply;
3618 struct mpssas_softc *sassc;
3624 * Currently there should be no way we can hit this case. It only
3625 * happens when we have a failure to allocate chain frames, and
3626 * port enable commands don't have S/G lists.
3628 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3629 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3630 "This should not happen!\n", __func__, cm->cm_flags);
3633 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3635 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3636 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3637 MPI2_IOCSTATUS_SUCCESS)
3638 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3640 mps_free_command(sc, cm);
3641 if (sc->mps_ich.ich_arg != NULL) {
3642 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3643 config_intrhook_disestablish(&sc->mps_ich);
3644 sc->mps_ich.ich_arg = NULL;
3648 * Get WarpDrive info after discovery is complete but before the scan
3649 * starts. At this point, all devices are ready to be exposed to the
3650 * OS. If devices should be hidden instead, take them out of the
3651 * 'targets' array before the scan. The devinfo for a disk will have
3652 * some info and a volume's will be 0. Use that to remove disks.
3654 mps_wd_config_pages(sc);
3657 * Done waiting for port enable to complete. Decrement the refcount.
3658 * If refcount is 0, discovery is complete and a rescan of the bus can
3659 * take place. Since the simq was explicitly frozen before port
3660 * enable, it must be explicitly released here to keep the
3661 * freeze/release count in sync.
3663 sc->wait_for_port_enable = 0;
3664 sc->port_enable_complete = 1;
3665 wakeup(&sc->port_enable_complete);
3666 mpssas_startup_decrement(sassc);
3670 mpssas_check_id(struct mpssas_softc *sassc, int id)
3672 struct mps_softc *sc = sassc->sc;
3676 ids = &sc->exclude_ids[0];
3677 while((name = strsep(&ids, ",")) != NULL) {
3678 if (name[0] == '\0')
3680 if (strtol(name, NULL, 0) == (long)id)
3688 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3690 struct mpssas_softc *sassc;
3691 struct mpssas_lun *lun, *lun_tmp;
3692 struct mpssas_target *targ;
3697 * The number of targets is based on IOC Facts, so free all of
3698 * the allocated LUNs for each target and then the target buffer
3701 for (i=0; i< maxtargets; i++) {
3702 targ = &sassc->targets[i];
3703 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3707 free(sassc->targets, M_MPT2);
3709 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3710 M_MPT2, M_WAITOK|M_ZERO);
3711 if (!sassc->targets) {
3712 panic("%s failed to alloc targets with error %d\n",