2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2015 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 /* Communications core for Avago Technologies (LSI) MPT2 */
38 /* TODO Move headers to mpsvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
48 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
57 #include <machine/bus.h>
58 #include <machine/resource.h>
61 #include <machine/stdarg.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_xpt.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #if __FreeBSD_version >= 900026
74 #include <cam/scsi/smp_all.h>
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
89 #define MPSSAS_DISCOVERY_TIMEOUT 20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
93 * static array to check SCSI OpCode for EEDP protection bits
95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124 struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128 struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 #if __FreeBSD_version >= 900026
133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137 #endif //FreeBSD_version >= 900026
138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149 struct mps_command *cm);
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
154 struct mpssas_target *target;
157 for (i = start; i < sassc->maxtargets; i++) {
158 target = &sassc->targets[i];
159 if (target->handle == handle)
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery. Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
174 mpssas_startup_increment(struct mpssas_softc *sassc)
176 MPS_FUNCTRACE(sassc->sc);
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INIT,
182 "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
186 xpt_freeze_simq(sassc->sim, 1);
188 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 sassc->startup_refcount);
194 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
196 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198 xpt_release_simq(sassc->sim, 1);
199 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
204 mpssas_startup_decrement(struct mpssas_softc *sassc)
206 MPS_FUNCTRACE(sassc->sc);
208 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209 if (--sassc->startup_refcount == 0) {
210 /* finished all discovery-related actions, release
211 * the simq and rescan for the latest topology.
213 mps_dprint(sassc->sc, MPS_INIT,
214 "%s releasing simq\n", __func__);
215 sassc->flags &= ~MPSSAS_IN_STARTUP;
216 xpt_release_simq(sassc->sim, 1);
217 #if __FreeBSD_version >= 1000039
220 mpssas_rescan_target(sassc->sc, NULL);
223 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224 sassc->startup_refcount);
228 /* The firmware requires us to stop sending commands when we're doing task
229 * management, so refcount the TMs and keep the simq frozen when any are in
233 mpssas_alloc_tm(struct mps_softc *sc)
235 struct mps_command *tm;
237 tm = mps_alloc_high_priority_command(sc);
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
244 int target_id = 0xFFFFFFFF;
250 * For TM's the devq is frozen for the device. Unfreeze it here and
251 * free the resources used for freezing the devq. Must clear the
252 * INRESET flag as well or scsi I/O will not work.
254 if (tm->cm_targ != NULL) {
255 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
256 target_id = tm->cm_targ->tid;
259 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
261 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
262 xpt_free_path(tm->cm_ccb->ccb_h.path);
263 xpt_free_ccb(tm->cm_ccb);
266 mps_free_high_priority_command(sc, tm);
270 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
272 struct mpssas_softc *sassc = sc->sassc;
274 target_id_t targetid;
278 pathid = cam_sim_path(sassc->sim);
280 targetid = CAM_TARGET_WILDCARD;
282 targetid = targ - sassc->targets;
285 * Allocate a CCB and schedule a rescan.
287 ccb = xpt_alloc_ccb_nowait();
289 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
293 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
294 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
295 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
300 if (targetid == CAM_TARGET_WILDCARD)
301 ccb->ccb_h.func_code = XPT_SCAN_BUS;
303 ccb->ccb_h.func_code = XPT_SCAN_TGT;
305 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
310 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
320 /* No need to be in here if debugging isn't enabled */
321 if ((cm->cm_sc->mps_debug & level) == 0)
324 sbuf_new(&sb, str, sizeof(str), 0);
328 if (cm->cm_ccb != NULL) {
329 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
331 sbuf_cat(&sb, path_str);
332 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
333 scsi_command_string(&cm->cm_ccb->csio, &sb);
334 sbuf_printf(&sb, "length %d ",
335 cm->cm_ccb->csio.dxfer_len);
339 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
340 cam_sim_name(cm->cm_sc->sassc->sim),
341 cam_sim_unit(cm->cm_sc->sassc->sim),
342 cam_sim_bus(cm->cm_sc->sassc->sim),
343 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
347 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
348 sbuf_vprintf(&sb, fmt, ap);
350 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
357 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
359 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
360 struct mpssas_target *targ;
365 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
366 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
370 /* XXX retry the remove after the diag reset completes? */
371 mps_dprint(sc, MPS_FAULT,
372 "%s NULL reply resetting device 0x%04x\n", __func__,
374 mpssas_free_tm(sc, tm);
378 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 MPI2_IOCSTATUS_SUCCESS) {
380 mps_dprint(sc, MPS_ERROR,
381 "IOCStatus = 0x%x while resetting device 0x%x\n",
382 le16toh(reply->IOCStatus), handle);
385 mps_dprint(sc, MPS_XINFO,
386 "Reset aborted %u commands\n", reply->TerminationCount);
387 mps_free_reply(sc, tm->cm_reply_data);
388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
390 mps_dprint(sc, MPS_XINFO,
391 "clearing target %u handle 0x%04x\n", targ->tid, handle);
394 * Don't clear target if remove fails because things will get confusing.
395 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 * this target id if possible, and so we can assign the same target id
397 * to this device if it comes back in the future.
399 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
400 MPI2_IOCSTATUS_SUCCESS) {
403 targ->encl_handle = 0x0;
404 targ->encl_slot = 0x0;
405 targ->exp_dev_handle = 0x0;
407 targ->linkrate = 0x0;
412 mpssas_free_tm(sc, tm);
417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418 * Otherwise Volume Delete is same as Bare Drive Removal.
421 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
423 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424 struct mps_softc *sc;
425 struct mps_command *cm;
426 struct mpssas_target *targ = NULL;
428 MPS_FUNCTRACE(sassc->sc);
433 * If this is a WD controller, determine if the disk should be exposed
434 * to the OS or not. If disk should be exposed, return from this
435 * function without doing anything.
437 if (sc->WD_available && (sc->WD_hide_expose ==
438 MPS_WD_EXPOSE_ALWAYS)) {
443 targ = mpssas_find_target_by_handle(sassc, 0, handle);
445 /* FIXME: what is the action? */
446 /* We don't know about this device? */
447 mps_dprint(sc, MPS_ERROR,
448 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
452 targ->flags |= MPSSAS_TARGET_INREMOVAL;
454 cm = mpssas_alloc_tm(sc);
456 mps_dprint(sc, MPS_ERROR,
457 "%s: command alloc failure\n", __func__);
461 mpssas_rescan_target(sc, targ);
463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
464 req->DevHandle = targ->handle;
465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
466 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
468 /* SAS Hard Link Reset / SATA Link Reset */
469 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
473 cm->cm_desc.HighPriority.RequestFlags =
474 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
475 cm->cm_complete = mpssas_remove_volume;
476 cm->cm_complete_data = (void *)(uintptr_t)handle;
478 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
479 __func__, targ->tid);
480 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
482 mps_map_command(sc, cm);
486 * The MPT2 firmware performs debounce on the link to avoid transient link
487 * errors and false removals. When it does decide that link has been lost
488 * and a device need to go away, it expects that the host will perform a
489 * target reset and then an op remove. The reset has the side-effect of
490 * aborting any outstanding requests for the device, which is required for
491 * the op-remove to succeed. It's not clear if the host should check for
492 * the device coming back alive after the reset.
495 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
497 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
498 struct mps_softc *sc;
499 struct mps_command *cm;
500 struct mpssas_target *targ = NULL;
502 MPS_FUNCTRACE(sassc->sc);
506 targ = mpssas_find_target_by_handle(sassc, 0, handle);
508 /* FIXME: what is the action? */
509 /* We don't know about this device? */
510 mps_dprint(sc, MPS_ERROR,
511 "%s : invalid handle 0x%x \n", __func__, handle);
515 targ->flags |= MPSSAS_TARGET_INREMOVAL;
517 cm = mpssas_alloc_tm(sc);
519 mps_dprint(sc, MPS_ERROR,
520 "%s: command alloc failure\n", __func__);
524 mpssas_rescan_target(sc, targ);
526 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
527 memset(req, 0, sizeof(*req));
528 req->DevHandle = htole16(targ->handle);
529 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
530 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
532 /* SAS Hard Link Reset / SATA Link Reset */
533 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
537 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
538 cm->cm_complete = mpssas_remove_device;
539 cm->cm_complete_data = (void *)(uintptr_t)handle;
541 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
542 __func__, targ->tid);
543 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
545 mps_map_command(sc, cm);
549 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
551 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
552 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
553 struct mpssas_target *targ;
554 struct mps_command *next_cm;
559 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
560 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
564 * Currently there should be no way we can hit this case. It only
565 * happens when we have a failure to allocate chain frames, and
566 * task management commands don't have S/G lists.
568 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
569 mps_dprint(sc, MPS_ERROR,
570 "%s: cm_flags = %#x for remove of handle %#04x! "
571 "This should not happen!\n", __func__, tm->cm_flags,
576 /* XXX retry the remove after the diag reset completes? */
577 mps_dprint(sc, MPS_FAULT,
578 "%s NULL reply resetting device 0x%04x\n", __func__,
580 mpssas_free_tm(sc, tm);
584 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
585 MPI2_IOCSTATUS_SUCCESS) {
586 mps_dprint(sc, MPS_ERROR,
587 "IOCStatus = 0x%x while resetting device 0x%x\n",
588 le16toh(reply->IOCStatus), handle);
591 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
592 le32toh(reply->TerminationCount));
593 mps_free_reply(sc, tm->cm_reply_data);
594 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
596 /* Reuse the existing command */
597 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
598 memset(req, 0, sizeof(*req));
599 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
600 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
601 req->DevHandle = htole16(handle);
603 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
604 tm->cm_complete = mpssas_remove_complete;
605 tm->cm_complete_data = (void *)(uintptr_t)handle;
607 mps_map_command(sc, tm);
609 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mpssas_scsiio_complete(sc, tm);
622 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
626 struct mpssas_target *targ;
627 struct mpssas_lun *lun;
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
639 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
640 mps_dprint(sc, MPS_XINFO,
641 "%s: cm_flags = %#x for remove of handle %#04x! "
642 "This should not happen!\n", __func__, tm->cm_flags,
644 mpssas_free_tm(sc, tm);
649 /* most likely a chip reset */
650 mps_dprint(sc, MPS_FAULT,
651 "%s NULL reply removing device 0x%04x\n", __func__, handle);
652 mpssas_free_tm(sc, tm);
656 mps_dprint(sc, MPS_XINFO,
657 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
658 handle, le16toh(reply->IOCStatus));
661 * Don't clear target if remove fails because things will get confusing.
662 * Leave the devname and sasaddr intact so that we know to avoid reusing
663 * this target id if possible, and so we can assign the same target id
664 * to this device if it comes back in the future.
666 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
667 MPI2_IOCSTATUS_SUCCESS) {
670 targ->encl_handle = 0x0;
671 targ->encl_slot = 0x0;
672 targ->exp_dev_handle = 0x0;
674 targ->linkrate = 0x0;
678 while(!SLIST_EMPTY(&targ->luns)) {
679 lun = SLIST_FIRST(&targ->luns);
680 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 mpssas_free_tm(sc, tm);
690 mpssas_register_events(struct mps_softc *sc)
692 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
695 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 setbit(events, MPI2_EVENT_IR_VOLUME);
704 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
708 mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 &sc->sassc->mpssas_eh);
715 mps_attach_sas(struct mps_softc *sc)
717 struct mpssas_softc *sassc;
723 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
725 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
731 * XXX MaxTargets could change during a reinit. Since we don't
732 * resize the targets[] array during such an event, cache the value
733 * of MaxTargets here so that we don't get into trouble later. This
734 * should move into the reinit logic.
736 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
737 sassc->targets = malloc(sizeof(struct mpssas_target) *
738 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
739 if(!sassc->targets) {
740 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
748 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
749 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
754 unit = device_get_unit(sc->mps_dev);
755 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
756 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
757 if (sassc->sim == NULL) {
758 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
763 TAILQ_INIT(&sassc->ev_queue);
765 /* Initialize taskqueue for Event Handling */
766 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
767 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
768 taskqueue_thread_enqueue, &sassc->ev_tq);
769 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
770 device_get_nameunit(sc->mps_dev));
775 * XXX There should be a bus for every port on the adapter, but since
776 * we're just going to fake the topology for now, we'll pretend that
777 * everything is just a target on a single bus.
779 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
780 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
787 * Assume that discovery events will start right away.
789 * Hold off boot until discovery is complete.
791 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
792 sc->sassc->startup_refcount = 0;
793 mpssas_startup_increment(sassc);
795 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
798 * Register for async events so we can determine the EEDP
799 * capabilities of devices.
801 status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
804 if (status != CAM_REQ_CMP) {
805 mps_printf(sc, "Error %#x creating sim path\n", status);
810 #if (__FreeBSD_version >= 1000006) || \
811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 event = AC_ADVINFO_CHANGED;
814 event = AC_FOUND_DEVICE;
816 status = xpt_register_async(event, mpssas_async, sc,
818 if (status != CAM_REQ_CMP) {
819 mps_dprint(sc, MPS_ERROR,
820 "Error %#x registering async handler for "
821 "AC_ADVINFO_CHANGED events\n", status);
822 xpt_free_path(sassc->path);
826 if (status != CAM_REQ_CMP) {
828 * EEDP use is the exception, not the rule.
829 * Warn the user, but do not fail to attach.
831 mps_printf(sc, "EEDP capabilities disabled.\n");
836 mpssas_register_events(sc);
844 mps_detach_sas(struct mps_softc *sc)
846 struct mpssas_softc *sassc;
847 struct mpssas_lun *lun, *lun_tmp;
848 struct mpssas_target *targ;
853 if (sc->sassc == NULL)
857 mps_deregister_events(sc, sassc->mpssas_eh);
860 * Drain and free the event handling taskqueue with the lock
861 * unheld so that any parallel processing tasks drain properly
862 * without deadlocking.
864 if (sassc->ev_tq != NULL)
865 taskqueue_free(sassc->ev_tq);
867 /* Make sure CAM doesn't wedge if we had to bail out early. */
870 /* Deregister our async handler */
871 if (sassc->path != NULL) {
872 xpt_register_async(0, mpssas_async, sc, sassc->path);
873 xpt_free_path(sassc->path);
877 if (sassc->flags & MPSSAS_IN_STARTUP)
878 xpt_release_simq(sassc->sim, 1);
880 if (sassc->sim != NULL) {
881 xpt_bus_deregister(cam_sim_path(sassc->sim));
882 cam_sim_free(sassc->sim, FALSE);
887 if (sassc->devq != NULL)
888 cam_simq_free(sassc->devq);
890 for(i=0; i< sassc->maxtargets ;i++) {
891 targ = &sassc->targets[i];
892 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
896 free(sassc->targets, M_MPT2);
904 mpssas_discovery_end(struct mpssas_softc *sassc)
906 struct mps_softc *sc = sassc->sc;
910 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
911 callout_stop(&sassc->discovery_callout);
914 * After discovery has completed, check the mapping table for any
915 * missing devices and update their missing counts. Only do this once
916 * whenever the driver is initialized so that missing counts aren't
917 * updated unnecessarily. Note that just because discovery has
918 * completed doesn't mean that events have been processed yet. The
919 * check_devices function is a callout timer that checks if ALL devices
920 * are missing. If so, it will wait a little longer for events to
921 * complete and keep resetting itself until some device in the mapping
922 * table is not missing, meaning that event processing has started.
924 if (sc->track_mapping_events) {
925 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
926 "completed. Check for missing devices in the mapping "
928 callout_reset(&sc->device_check_callout,
929 MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
935 mpssas_action(struct cam_sim *sim, union ccb *ccb)
937 struct mpssas_softc *sassc;
939 sassc = cam_sim_softc(sim);
941 MPS_FUNCTRACE(sassc->sc);
942 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
943 ccb->ccb_h.func_code);
944 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
946 switch (ccb->ccb_h.func_code) {
949 struct ccb_pathinq *cpi = &ccb->cpi;
950 struct mps_softc *sc = sassc->sc;
951 uint8_t sges_per_frame;
953 cpi->version_num = 1;
954 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
955 cpi->target_sprt = 0;
956 #if __FreeBSD_version >= 1000039
957 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
959 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
961 cpi->hba_eng_cnt = 0;
962 cpi->max_target = sassc->maxtargets - 1;
966 * initiator_id is set here to an ID outside the set of valid
967 * target IDs (including volumes).
969 cpi->initiator_id = sassc->maxtargets;
970 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
971 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
972 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
973 cpi->unit_number = cam_sim_unit(sim);
974 cpi->bus_id = cam_sim_bus(sim);
975 cpi->base_transfer_speed = 150000;
976 cpi->transport = XPORT_SAS;
977 cpi->transport_version = 0;
978 cpi->protocol = PROTO_SCSI;
979 cpi->protocol_version = SCSI_REV_SPC;
982 * Max IO Size is Page Size * the following:
983 * ((SGEs per frame - 1 for chain element) *
984 * Max Chain Depth) + 1 for no chain needed in last frame
986 * If user suggests a Max IO size to use, use the smaller of the
987 * user's value and the calculated value as long as the user's
988 * value is larger than 0. The user's value is in pages.
990 sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
991 sizeof(MPI2_SGE_SIMPLE64)) - 1;
992 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
993 cpi->maxio *= PAGE_SIZE;
994 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
996 cpi->maxio = sc->max_io_pages * PAGE_SIZE;
997 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1000 case XPT_GET_TRAN_SETTINGS:
1002 struct ccb_trans_settings *cts;
1003 struct ccb_trans_settings_sas *sas;
1004 struct ccb_trans_settings_scsi *scsi;
1005 struct mpssas_target *targ;
1008 sas = &cts->xport_specific.sas;
1009 scsi = &cts->proto_specific.scsi;
1011 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1012 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1013 cts->ccb_h.target_id));
1014 targ = &sassc->targets[cts->ccb_h.target_id];
1015 if (targ->handle == 0x0) {
1016 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1020 cts->protocol_version = SCSI_REV_SPC2;
1021 cts->transport = XPORT_SAS;
1022 cts->transport_version = 0;
1024 sas->valid = CTS_SAS_VALID_SPEED;
1025 switch (targ->linkrate) {
1027 sas->bitrate = 150000;
1030 sas->bitrate = 300000;
1033 sas->bitrate = 600000;
1039 cts->protocol = PROTO_SCSI;
1040 scsi->valid = CTS_SCSI_VALID_TQ;
1041 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1043 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1046 case XPT_CALC_GEOMETRY:
1047 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1048 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1051 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1052 mpssas_action_resetdev(sassc, ccb);
1057 mps_dprint(sassc->sc, MPS_XINFO,
1058 "mpssas_action faking success for abort or reset\n");
1059 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1062 mpssas_action_scsiio(sassc, ccb);
1064 #if __FreeBSD_version >= 900026
1066 mpssas_action_smpio(sassc, ccb);
1070 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1078 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1079 target_id_t target_id, lun_id_t lun_id)
1081 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1082 struct cam_path *path;
1084 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1085 ac_code, target_id, (uintmax_t)lun_id);
1087 if (xpt_create_path(&path, NULL,
1088 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1089 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1094 xpt_async(ac_code, path, NULL);
1095 xpt_free_path(path);
1099 mpssas_complete_all_commands(struct mps_softc *sc)
1101 struct mps_command *cm;
1106 mtx_assert(&sc->mps_mtx, MA_OWNED);
1108 /* complete all commands with a NULL reply */
1109 for (i = 1; i < sc->num_reqs; i++) {
1110 cm = &sc->commands[i];
1111 cm->cm_reply = NULL;
1114 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1115 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1117 if (cm->cm_complete != NULL) {
1118 mpssas_log_command(cm, MPS_RECOVERY,
1119 "completing cm %p state %x ccb %p for diag reset\n",
1120 cm, cm->cm_state, cm->cm_ccb);
1122 cm->cm_complete(sc, cm);
1126 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1127 mpssas_log_command(cm, MPS_RECOVERY,
1128 "waking up cm %p state %x ccb %p for diag reset\n",
1129 cm, cm->cm_state, cm->cm_ccb);
1134 if (cm->cm_sc->io_cmds_active != 0) {
1135 cm->cm_sc->io_cmds_active--;
1137 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1138 "io_cmds_active is out of sync - resynching to "
1142 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1143 /* this should never happen, but if it does, log */
1144 mpssas_log_command(cm, MPS_RECOVERY,
1145 "cm %p state %x flags 0x%x ccb %p during diag "
1146 "reset\n", cm, cm->cm_state, cm->cm_flags,
1153 mpssas_handle_reinit(struct mps_softc *sc)
1157 /* Go back into startup mode and freeze the simq, so that CAM
1158 * doesn't send any commands until after we've rediscovered all
1159 * targets and found the proper device handles for them.
1161 * After the reset, portenable will trigger discovery, and after all
1162 * discovery-related activities have finished, the simq will be
1165 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1166 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1167 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1168 mpssas_startup_increment(sc->sassc);
1170 /* notify CAM of a bus reset */
1171 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1174 /* complete and cleanup after all outstanding commands */
1175 mpssas_complete_all_commands(sc);
1177 mps_dprint(sc, MPS_INIT,
1178 "%s startup %u after command completion\n", __func__,
1179 sc->sassc->startup_refcount);
1181 /* zero all the target handles, since they may change after the
1182 * reset, and we have to rediscover all the targets and use the new
1185 for (i = 0; i < sc->sassc->maxtargets; i++) {
1186 if (sc->sassc->targets[i].outstanding != 0)
1187 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1188 i, sc->sassc->targets[i].outstanding);
1189 sc->sassc->targets[i].handle = 0x0;
1190 sc->sassc->targets[i].exp_dev_handle = 0x0;
1191 sc->sassc->targets[i].outstanding = 0;
1192 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1197 mpssas_tm_timeout(void *data)
1199 struct mps_command *tm = data;
1200 struct mps_softc *sc = tm->cm_sc;
1202 mtx_assert(&sc->mps_mtx, MA_OWNED);
1204 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1205 "task mgmt %p timed out\n", tm);
1210 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1212 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1213 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1214 unsigned int cm_count = 0;
1215 struct mps_command *cm;
1216 struct mpssas_target *targ;
1218 callout_stop(&tm->cm_callout);
1220 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1221 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1225 * Currently there should be no way we can hit this case. It only
1226 * happens when we have a failure to allocate chain frames, and
1227 * task management commands don't have S/G lists.
1228 * XXXSL So should it be an assertion?
1230 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1231 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1232 "This should not happen!\n", __func__, tm->cm_flags);
1233 mpssas_free_tm(sc, tm);
1237 if (reply == NULL) {
1238 mpssas_log_command(tm, MPS_RECOVERY,
1239 "NULL reset reply for tm %p\n", tm);
1240 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1241 /* this completion was due to a reset, just cleanup */
1243 mpssas_free_tm(sc, tm);
1246 /* we should have gotten a reply. */
1252 mpssas_log_command(tm, MPS_RECOVERY,
1253 "logical unit reset status 0x%x code 0x%x count %u\n",
1254 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1255 le32toh(reply->TerminationCount));
1257 /* See if there are any outstanding commands for this LUN.
1258 * This could be made more efficient by using a per-LU data
1259 * structure of some sort.
1261 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1262 if (cm->cm_lun == tm->cm_lun)
1266 if (cm_count == 0) {
1267 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1268 "logical unit %u finished recovery after reset\n",
1271 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1274 /* we've finished recovery for this logical unit. check and
1275 * see if some other logical unit has a timedout command
1276 * that needs to be processed.
1278 cm = TAILQ_FIRST(&targ->timedout_commands);
1280 mpssas_send_abort(sc, tm, cm);
1284 mpssas_free_tm(sc, tm);
1288 /* if we still have commands for this LUN, the reset
1289 * effectively failed, regardless of the status reported.
1290 * Escalate to a target reset.
1292 mpssas_log_command(tm, MPS_RECOVERY,
1293 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1295 mpssas_send_reset(sc, tm,
1296 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1301 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1303 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1304 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1305 struct mpssas_target *targ;
1307 callout_stop(&tm->cm_callout);
1309 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1310 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1314 * Currently there should be no way we can hit this case. It only
1315 * happens when we have a failure to allocate chain frames, and
1316 * task management commands don't have S/G lists.
1318 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1319 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1320 "This should not happen!\n", __func__, tm->cm_flags);
1321 mpssas_free_tm(sc, tm);
1325 if (reply == NULL) {
1326 mpssas_log_command(tm, MPS_RECOVERY,
1327 "NULL reset reply for tm %p\n", tm);
1328 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1329 /* this completion was due to a reset, just cleanup */
1331 mpssas_free_tm(sc, tm);
1334 /* we should have gotten a reply. */
1340 mpssas_log_command(tm, MPS_RECOVERY,
1341 "target reset status 0x%x code 0x%x count %u\n",
1342 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1343 le32toh(reply->TerminationCount));
1345 if (targ->outstanding == 0) {
1346 /* we've finished recovery for this target and all
1347 * of its logical units.
1349 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1350 "recovery finished after target reset\n");
1352 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1356 mpssas_free_tm(sc, tm);
1359 /* after a target reset, if this target still has
1360 * outstanding commands, the reset effectively failed,
1361 * regardless of the status reported. escalate.
1363 mpssas_log_command(tm, MPS_RECOVERY,
1364 "target reset complete for tm %p, but still have %u command(s)\n",
1365 tm, targ->outstanding);
1370 #define MPS_RESET_TIMEOUT 30
1373 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1375 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1376 struct mpssas_target *target;
1379 target = tm->cm_targ;
1380 if (target->handle == 0) {
1381 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1382 __func__, target->tid);
1386 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1387 req->DevHandle = htole16(target->handle);
1388 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1389 req->TaskType = type;
1391 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1392 /* XXX Need to handle invalid LUNs */
1393 MPS_SET_LUN(req->LUN, tm->cm_lun);
1394 tm->cm_targ->logical_unit_resets++;
1395 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1396 "sending logical unit reset\n");
1397 tm->cm_complete = mpssas_logical_unit_reset_complete;
1398 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1400 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1402 * Target reset method =
1403 * SAS Hard Link Reset / SATA Link Reset
1405 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1406 tm->cm_targ->target_resets++;
1407 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1408 "sending target reset\n");
1409 tm->cm_complete = mpssas_target_reset_complete;
1410 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1413 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1418 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1419 tm->cm_complete_data = (void *)tm;
1421 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1422 mpssas_tm_timeout, tm);
1424 err = mps_map_command(sc, tm);
1426 mpssas_log_command(tm, MPS_RECOVERY,
1427 "error %d sending reset type %u\n",
1435 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1437 struct mps_command *cm;
1438 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1439 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1440 struct mpssas_target *targ;
1442 callout_stop(&tm->cm_callout);
1444 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1445 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1449 * Currently there should be no way we can hit this case. It only
1450 * happens when we have a failure to allocate chain frames, and
1451 * task management commands don't have S/G lists.
1453 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1454 mpssas_log_command(tm, MPS_RECOVERY,
1455 "cm_flags = %#x for abort %p TaskMID %u!\n",
1456 tm->cm_flags, tm, le16toh(req->TaskMID));
1457 mpssas_free_tm(sc, tm);
1461 if (reply == NULL) {
1462 mpssas_log_command(tm, MPS_RECOVERY,
1463 "NULL abort reply for tm %p TaskMID %u\n",
1464 tm, le16toh(req->TaskMID));
1465 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1466 /* this completion was due to a reset, just cleanup */
1468 mpssas_free_tm(sc, tm);
1471 /* we should have gotten a reply. */
1477 mpssas_log_command(tm, MPS_RECOVERY,
1478 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1479 le16toh(req->TaskMID),
1480 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1481 le32toh(reply->TerminationCount));
1483 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1485 /* if there are no more timedout commands, we're done with
1486 * error recovery for this target.
1488 mpssas_log_command(tm, MPS_RECOVERY,
1489 "finished recovery after aborting TaskMID %u\n",
1490 le16toh(req->TaskMID));
1493 mpssas_free_tm(sc, tm);
1495 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1496 /* abort success, but we have more timedout commands to abort */
1497 mpssas_log_command(tm, MPS_RECOVERY,
1498 "continuing recovery after aborting TaskMID %u\n",
1499 le16toh(req->TaskMID));
1501 mpssas_send_abort(sc, tm, cm);
1504 /* we didn't get a command completion, so the abort
1505 * failed as far as we're concerned. escalate.
1507 mpssas_log_command(tm, MPS_RECOVERY,
1508 "abort failed for TaskMID %u tm %p\n",
1509 le16toh(req->TaskMID), tm);
1511 mpssas_send_reset(sc, tm,
1512 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1516 #define MPS_ABORT_TIMEOUT 5
1519 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1521 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1522 struct mpssas_target *targ;
1526 if (targ->handle == 0) {
1527 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1528 __func__, cm->cm_ccb->ccb_h.target_id);
1532 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1533 "Aborting command %p\n", cm);
1535 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1536 req->DevHandle = htole16(targ->handle);
1537 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1538 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1540 /* XXX Need to handle invalid LUNs */
1541 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1543 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1546 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1547 tm->cm_complete = mpssas_abort_complete;
1548 tm->cm_complete_data = (void *)tm;
1549 tm->cm_targ = cm->cm_targ;
1550 tm->cm_lun = cm->cm_lun;
1552 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1553 mpssas_tm_timeout, tm);
1557 mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1558 __func__, targ->tid);
1559 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1561 err = mps_map_command(sc, tm);
1563 mps_dprint(sc, MPS_RECOVERY,
1564 "error %d sending abort for cm %p SMID %u\n",
1565 err, cm, req->TaskMID);
1570 mpssas_scsiio_timeout(void *data)
1572 struct mps_softc *sc;
1573 struct mps_command *cm;
1574 struct mpssas_target *targ;
1576 cm = (struct mps_command *)data;
1580 mtx_assert(&sc->mps_mtx, MA_OWNED);
1582 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1585 * Run the interrupt handler to make sure it's not pending. This
1586 * isn't perfect because the command could have already completed
1587 * and been re-used, though this is unlikely.
1589 mps_intr_locked(sc);
1590 if (cm->cm_state == MPS_CM_STATE_FREE) {
1591 mpssas_log_command(cm, MPS_XINFO,
1592 "SCSI command %p almost timed out\n", cm);
1596 if (cm->cm_ccb == NULL) {
1597 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1604 mpssas_log_command(cm, MPS_ERROR, "command timeout %d cm %p target "
1605 "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
1608 /* XXX first, check the firmware state, to see if it's still
1609 * operational. if not, do a diag reset.
1611 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1612 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1613 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1615 if (targ->tm != NULL) {
1616 /* target already in recovery, just queue up another
1617 * timedout command to be processed later.
1619 mps_dprint(sc, MPS_RECOVERY,
1620 "queued timedout cm %p for processing by tm %p\n",
1623 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1624 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1627 /* start recovery by aborting the first timedout command */
1628 mpssas_send_abort(sc, targ->tm, cm);
1631 /* XXX queue this target up for recovery once a TM becomes
1632 * available. The firmware only has a limited number of
1633 * HighPriority credits for the high priority requests used
1634 * for task management, and we ran out.
1636 * Isilon: don't worry about this for now, since we have
1637 * more credits than disks in an enclosure, and limit
1638 * ourselves to one TM per target for recovery.
1640 mps_dprint(sc, MPS_RECOVERY,
1641 "timedout cm %p failed to allocate a tm\n", cm);
1647 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1649 MPI2_SCSI_IO_REQUEST *req;
1650 struct ccb_scsiio *csio;
1651 struct mps_softc *sc;
1652 struct mpssas_target *targ;
1653 struct mpssas_lun *lun;
1654 struct mps_command *cm;
1655 uint8_t i, lba_byte, *ref_tag_addr;
1656 uint16_t eedp_flags;
1657 uint32_t mpi_control;
1661 mtx_assert(&sc->mps_mtx, MA_OWNED);
1664 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1665 ("Target %d out of bounds in XPT_SCSI_IO\n",
1666 csio->ccb_h.target_id));
1667 targ = &sassc->targets[csio->ccb_h.target_id];
1668 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1669 if (targ->handle == 0x0) {
1670 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1671 __func__, csio->ccb_h.target_id);
1672 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1676 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1677 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1678 "supported %u\n", __func__, csio->ccb_h.target_id);
1679 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1684 * Sometimes, it is possible to get a command that is not "In
1685 * Progress" and was actually aborted by the upper layer. Check for
1686 * this here and complete the command without error.
1688 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1689 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1690 "target %u\n", __func__, csio->ccb_h.target_id);
1695 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1696 * that the volume has timed out. We want volumes to be enumerated
1697 * until they are deleted/removed, not just failed.
1699 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1700 if (targ->devinfo == 0)
1701 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1703 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1708 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1709 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1710 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1716 * If target has a reset in progress, freeze the devq and return. The
1717 * devq will be released when the TM reset is finished.
1719 if (targ->flags & MPSSAS_TARGET_INRESET) {
1720 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1721 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1722 __func__, targ->tid);
1723 xpt_freeze_devq(ccb->ccb_h.path, 1);
1728 cm = mps_alloc_command(sc);
1729 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1731 mps_free_command(sc, cm);
1733 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1734 xpt_freeze_simq(sassc->sim, 1);
1735 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1737 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1738 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1743 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1744 bzero(req, sizeof(*req));
1745 req->DevHandle = htole16(targ->handle);
1746 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1748 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1749 req->SenseBufferLength = MPS_SENSE_LEN;
1751 req->ChainOffset = 0;
1752 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1757 req->DataLength = htole32(csio->dxfer_len);
1758 req->BidirectionalDataLength = 0;
1759 req->IoFlags = htole16(csio->cdb_len);
1762 /* Note: BiDirectional transfers are not supported */
1763 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1765 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1766 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1769 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1770 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1774 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1778 if (csio->cdb_len == 32)
1779 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1781 * It looks like the hardware doesn't require an explicit tag
1782 * number for each transaction. SAM Task Management not supported
1785 switch (csio->tag_action) {
1786 case MSG_HEAD_OF_Q_TAG:
1787 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1789 case MSG_ORDERED_Q_TAG:
1790 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1793 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1795 case CAM_TAG_ACTION_NONE:
1796 case MSG_SIMPLE_Q_TAG:
1798 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1801 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1802 req->Control = htole32(mpi_control);
1803 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1804 mps_free_command(sc, cm);
1805 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1810 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1811 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1813 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1814 req->IoFlags = htole16(csio->cdb_len);
1817 * Check if EEDP is supported and enabled. If it is then check if the
1818 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1819 * is formatted for EEDP support. If all of this is true, set CDB up
1820 * for EEDP transfer.
1822 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1823 if (sc->eedp_enabled && eedp_flags) {
1824 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1825 if (lun->lun_id == csio->ccb_h.target_lun) {
1830 if ((lun != NULL) && (lun->eedp_formatted)) {
1831 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1832 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1833 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1834 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1835 req->EEDPFlags = htole16(eedp_flags);
1838 * If CDB less than 32, fill in Primary Ref Tag with
1839 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1840 * already there. Also, set protection bit. FreeBSD
1841 * currently does not support CDBs bigger than 16, but
1842 * the code doesn't hurt, and will be here for the
1845 if (csio->cdb_len != 32) {
1846 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1847 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1848 PrimaryReferenceTag;
1849 for (i = 0; i < 4; i++) {
1851 req->CDB.CDB32[lba_byte + i];
1854 req->CDB.EEDP32.PrimaryReferenceTag =
1855 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1856 req->CDB.EEDP32.PrimaryApplicationTagMask =
1858 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1862 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1863 req->EEDPFlags = htole16(eedp_flags);
1864 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1870 cm->cm_length = csio->dxfer_len;
1871 if (cm->cm_length != 0) {
1873 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1877 cm->cm_sge = &req->SGL;
1878 cm->cm_sglsize = (32 - 24) * 4;
1879 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1880 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1881 cm->cm_complete = mpssas_scsiio_complete;
1882 cm->cm_complete_data = ccb;
1884 cm->cm_lun = csio->ccb_h.target_lun;
1888 * If HBA is a WD and the command is not for a retry, try to build a
1889 * direct I/O message. If failed, or the command is for a retry, send
1890 * the I/O to the IR volume itself.
1892 if (sc->WD_valid_config) {
1893 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1894 mpssas_direct_drive_io(sassc, cm, ccb);
1896 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1900 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1901 mpssas_scsiio_timeout, cm, 0);
1904 targ->outstanding++;
1905 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1906 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1908 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1909 __func__, cm, ccb, targ->outstanding);
1911 mps_map_command(sc, cm);
1916 mps_response_code(struct mps_softc *sc, u8 response_code)
1920 switch (response_code) {
1921 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1922 desc = "task management request completed";
1924 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1925 desc = "invalid frame";
1927 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1928 desc = "task management request not supported";
1930 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1931 desc = "task management request failed";
1933 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1934 desc = "task management request succeeded";
1936 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1937 desc = "invalid lun";
1940 desc = "overlapped tag attempted";
1942 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1943 desc = "task queued, however not sent to target";
1949 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1950 response_code, desc);
1953 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1956 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1957 Mpi2SCSIIOReply_t *mpi_reply)
1961 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1962 MPI2_IOCSTATUS_MASK;
1963 u8 scsi_state = mpi_reply->SCSIState;
1964 u8 scsi_status = mpi_reply->SCSIStatus;
1965 char *desc_ioc_state = NULL;
1966 char *desc_scsi_status = NULL;
1967 char *desc_scsi_state = sc->tmp_string;
1968 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1970 if (log_info == 0x31170000)
1973 switch (ioc_status) {
1974 case MPI2_IOCSTATUS_SUCCESS:
1975 desc_ioc_state = "success";
1977 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1978 desc_ioc_state = "invalid function";
1980 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1981 desc_ioc_state = "scsi recovered error";
1983 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1984 desc_ioc_state = "scsi invalid dev handle";
1986 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1987 desc_ioc_state = "scsi device not there";
1989 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1990 desc_ioc_state = "scsi data overrun";
1992 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1993 desc_ioc_state = "scsi data underrun";
1995 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1996 desc_ioc_state = "scsi io data error";
1998 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1999 desc_ioc_state = "scsi protocol error";
2001 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2002 desc_ioc_state = "scsi task terminated";
2004 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2005 desc_ioc_state = "scsi residual mismatch";
2007 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2008 desc_ioc_state = "scsi task mgmt failed";
2010 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2011 desc_ioc_state = "scsi ioc terminated";
2013 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2014 desc_ioc_state = "scsi ext terminated";
2016 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2017 desc_ioc_state = "eedp guard error";
2019 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2020 desc_ioc_state = "eedp ref tag error";
2022 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2023 desc_ioc_state = "eedp app tag error";
2026 desc_ioc_state = "unknown";
2030 switch (scsi_status) {
2031 case MPI2_SCSI_STATUS_GOOD:
2032 desc_scsi_status = "good";
2034 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2035 desc_scsi_status = "check condition";
2037 case MPI2_SCSI_STATUS_CONDITION_MET:
2038 desc_scsi_status = "condition met";
2040 case MPI2_SCSI_STATUS_BUSY:
2041 desc_scsi_status = "busy";
2043 case MPI2_SCSI_STATUS_INTERMEDIATE:
2044 desc_scsi_status = "intermediate";
2046 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2047 desc_scsi_status = "intermediate condmet";
2049 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2050 desc_scsi_status = "reservation conflict";
2052 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2053 desc_scsi_status = "command terminated";
2055 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2056 desc_scsi_status = "task set full";
2058 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2059 desc_scsi_status = "aca active";
2061 case MPI2_SCSI_STATUS_TASK_ABORTED:
2062 desc_scsi_status = "task aborted";
2065 desc_scsi_status = "unknown";
2069 desc_scsi_state[0] = '\0';
2071 desc_scsi_state = " ";
2072 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2073 strcat(desc_scsi_state, "response info ");
2074 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2075 strcat(desc_scsi_state, "state terminated ");
2076 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2077 strcat(desc_scsi_state, "no status ");
2078 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2079 strcat(desc_scsi_state, "autosense failed ");
2080 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2081 strcat(desc_scsi_state, "autosense valid ");
2083 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2084 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2085 /* We can add more detail about underflow data here
2088 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2089 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2090 desc_scsi_state, scsi_state);
2092 if (sc->mps_debug & MPS_XINFO &&
2093 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2094 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2095 scsi_sense_print(csio);
2096 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2099 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2100 response_info = le32toh(mpi_reply->ResponseInfo);
2101 response_bytes = (u8 *)&response_info;
2102 mps_response_code(sc,response_bytes[0]);
2107 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2109 MPI2_SCSI_IO_REPLY *rep;
2111 struct ccb_scsiio *csio;
2112 struct mpssas_softc *sassc;
2113 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2114 u8 *TLR_bits, TLR_on;
2117 struct mpssas_target *target;
2118 target_id_t target_id;
2121 mps_dprint(sc, MPS_TRACE,
2122 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2123 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2124 cm->cm_targ->outstanding);
2126 callout_stop(&cm->cm_callout);
2127 mtx_assert(&sc->mps_mtx, MA_OWNED);
2130 ccb = cm->cm_complete_data;
2132 target_id = csio->ccb_h.target_id;
2133 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2135 * XXX KDM if the chain allocation fails, does it matter if we do
2136 * the sync and unload here? It is simpler to do it in every case,
2137 * assuming it doesn't cause problems.
2139 if (cm->cm_data != NULL) {
2140 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2141 dir = BUS_DMASYNC_POSTREAD;
2142 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2143 dir = BUS_DMASYNC_POSTWRITE;
2144 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2145 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2148 cm->cm_targ->completed++;
2149 cm->cm_targ->outstanding--;
2150 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2151 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2153 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2154 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2155 if (cm->cm_reply != NULL)
2156 mpssas_log_command(cm, MPS_RECOVERY,
2157 "completed timedout cm %p ccb %p during recovery "
2158 "ioc %x scsi %x state %x xfer %u\n",
2160 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2161 le32toh(rep->TransferCount));
2163 mpssas_log_command(cm, MPS_RECOVERY,
2164 "completed timedout cm %p ccb %p during recovery\n",
2166 } else if (cm->cm_targ->tm != NULL) {
2167 if (cm->cm_reply != NULL)
2168 mpssas_log_command(cm, MPS_RECOVERY,
2169 "completed cm %p ccb %p during recovery "
2170 "ioc %x scsi %x state %x xfer %u\n",
2172 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2173 le32toh(rep->TransferCount));
2175 mpssas_log_command(cm, MPS_RECOVERY,
2176 "completed cm %p ccb %p during recovery\n",
2178 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2179 mpssas_log_command(cm, MPS_RECOVERY,
2180 "reset completed cm %p ccb %p\n",
2184 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2186 * We ran into an error after we tried to map the command,
2187 * so we're getting a callback without queueing the command
2188 * to the hardware. So we set the status here, and it will
2189 * be retained below. We'll go through the "fast path",
2190 * because there can be no reply when we haven't actually
2191 * gone out to the hardware.
2193 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2196 * Currently the only error included in the mask is
2197 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2198 * chain frames. We need to freeze the queue until we get
2199 * a command that completed without this error, which will
2200 * hopefully have some chain frames attached that we can
2201 * use. If we wanted to get smarter about it, we would
2202 * only unfreeze the queue in this condition when we're
2203 * sure that we're getting some chain frames back. That's
2204 * probably unnecessary.
2206 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2207 xpt_freeze_simq(sassc->sim, 1);
2208 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2209 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2210 "freezing SIM queue\n");
2215 * If this is a Start Stop Unit command and it was issued by the driver
2216 * during shutdown, decrement the refcount to account for all of the
2217 * commands that were sent. All SSU commands should be completed before
2218 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2221 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2222 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2226 /* Take the fast path to completion */
2227 if (cm->cm_reply == NULL) {
2228 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2229 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2230 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2232 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2233 ccb->csio.scsi_status = SCSI_STATUS_OK;
2235 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2236 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2237 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2238 mps_dprint(sc, MPS_XINFO,
2239 "Unfreezing SIM queue\n");
2244 * There are two scenarios where the status won't be
2245 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2246 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2248 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2250 * Freeze the dev queue so that commands are
2251 * executed in the correct order after error
2254 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2255 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2257 mps_free_command(sc, cm);
2262 mpssas_log_command(cm, MPS_XINFO,
2263 "ioc %x scsi %x state %x xfer %u\n",
2264 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2265 le32toh(rep->TransferCount));
2268 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2269 * Volume if an error occurred (normal I/O retry). Use the original
2270 * CCB, but set a flag that this will be a retry so that it's sent to
2271 * the original volume. Free the command but reuse the CCB.
2273 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2274 mps_free_command(sc, cm);
2275 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2276 mpssas_action_scsiio(sassc, ccb);
2279 ccb->ccb_h.sim_priv.entries[0].field = 0;
2281 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2282 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2283 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2285 case MPI2_IOCSTATUS_SUCCESS:
2286 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2288 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2289 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2290 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2292 /* Completion failed at the transport level. */
2293 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2294 MPI2_SCSI_STATE_TERMINATED)) {
2295 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2299 /* In a modern packetized environment, an autosense failure
2300 * implies that there's not much else that can be done to
2301 * recover the command.
2303 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2304 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2309 * CAM doesn't care about SAS Response Info data, but if this is
2310 * the state check if TLR should be done. If not, clear the
2311 * TLR_bits for the target.
2313 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2314 ((le32toh(rep->ResponseInfo) &
2315 MPI2_SCSI_RI_MASK_REASONCODE) ==
2316 MPS_SCSI_RI_INVALID_FRAME)) {
2317 sc->mapping_table[target_id].TLR_bits =
2318 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2322 * Intentionally override the normal SCSI status reporting
2323 * for these two cases. These are likely to happen in a
2324 * multi-initiator environment, and we want to make sure that
2325 * CAM retries these commands rather than fail them.
2327 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2328 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2329 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2333 /* Handle normal status and sense */
2334 csio->scsi_status = rep->SCSIStatus;
2335 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2336 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2338 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2340 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2341 int sense_len, returned_sense_len;
2343 returned_sense_len = min(le32toh(rep->SenseCount),
2344 sizeof(struct scsi_sense_data));
2345 if (returned_sense_len < ccb->csio.sense_len)
2346 ccb->csio.sense_resid = ccb->csio.sense_len -
2349 ccb->csio.sense_resid = 0;
2351 sense_len = min(returned_sense_len,
2352 ccb->csio.sense_len - ccb->csio.sense_resid);
2353 bzero(&ccb->csio.sense_data,
2354 sizeof(ccb->csio.sense_data));
2355 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2356 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2360 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2361 * and it's page code 0 (Supported Page List), and there is
2362 * inquiry data, and this is for a sequential access device, and
2363 * the device is an SSP target, and TLR is supported by the
2364 * controller, turn the TLR_bits value ON if page 0x90 is
2367 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2368 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2369 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2370 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2371 (csio->data_ptr != NULL) &&
2372 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2373 (sc->control_TLR) &&
2374 (sc->mapping_table[target_id].device_info &
2375 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2376 vpd_list = (struct scsi_vpd_supported_page_list *)
2378 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2379 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2380 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2381 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2382 csio->cdb_io.cdb_bytes[4];
2383 alloc_len -= csio->resid;
2384 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2385 if (vpd_list->list[i] == 0x90) {
2393 * If this is a SATA direct-access end device, mark it so that
2394 * a SCSI StartStopUnit command will be sent to it when the
2395 * driver is being shutdown.
2397 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2398 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2399 (sc->mapping_table[target_id].device_info &
2400 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2401 ((sc->mapping_table[target_id].device_info &
2402 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2403 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2404 target = &sassc->targets[target_id];
2405 target->supports_SSU = TRUE;
2406 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2410 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2411 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2413 * If devinfo is 0 this will be a volume. In that case don't
2414 * tell CAM that the volume is not there. We want volumes to
2415 * be enumerated until they are deleted/removed, not just
2418 if (cm->cm_targ->devinfo == 0)
2419 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2421 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2423 case MPI2_IOCSTATUS_INVALID_SGL:
2424 mps_print_scsiio_cmd(sc, cm);
2425 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2427 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2429 * This is one of the responses that comes back when an I/O
2430 * has been aborted. If it is because of a timeout that we
2431 * initiated, just set the status to CAM_CMD_TIMEOUT.
2432 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2433 * command is the same (it gets retried, subject to the
2434 * retry counter), the only difference is what gets printed
2437 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2438 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2440 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2442 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2443 /* resid is ignored for this condition */
2445 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2447 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2448 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2450 * These can sometimes be transient transport-related
2451 * errors, and sometimes persistent drive-related errors.
2452 * We used to retry these without decrementing the retry
2453 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2454 * we hit a persistent drive problem that returns one of
2455 * these error codes, we would retry indefinitely. So,
2456 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2457 * count and avoid infinite retries. We're taking the
2458 * potential risk of flagging false failures in the event
2459 * of a topology-related error (e.g. a SAS expander problem
2460 * causes a command addressed to a drive to fail), but
2461 * avoiding getting into an infinite retry loop.
2463 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2464 mpssas_log_command(cm, MPS_INFO,
2465 "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2466 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2467 rep->SCSIStatus, rep->SCSIState,
2468 le32toh(rep->TransferCount));
2470 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2471 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2472 case MPI2_IOCSTATUS_INVALID_VPID:
2473 case MPI2_IOCSTATUS_INVALID_FIELD:
2474 case MPI2_IOCSTATUS_INVALID_STATE:
2475 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2476 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2477 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2478 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2479 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2481 mpssas_log_command(cm, MPS_XINFO,
2482 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2483 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2484 rep->SCSIStatus, rep->SCSIState,
2485 le32toh(rep->TransferCount));
2486 csio->resid = cm->cm_length;
2487 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2491 mps_sc_failed_io_info(sc,csio,rep);
2493 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2494 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2495 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2496 mps_dprint(sc, MPS_XINFO, "Command completed, "
2497 "unfreezing SIM queue\n");
2500 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2501 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2502 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2505 mps_free_command(sc, cm);
2509 /* All Request reached here are Endian safe */
2511 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2513 pMpi2SCSIIORequest_t pIO_req;
2514 struct mps_softc *sc = sassc->sc;
2516 uint32_t physLBA, stripe_offset, stripe_unit;
2517 uint32_t io_size, column;
2518 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2521 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2522 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2523 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2524 * bit different than the 10/16 CDBs, handle them separately.
2526 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2527 CDB = pIO_req->CDB.CDB32;
2530 * Handle 6 byte CDBs.
2532 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2533 (CDB[0] == WRITE_6))) {
2535 * Get the transfer size in blocks.
2537 io_size = (cm->cm_length >> sc->DD_block_exponent);
2540 * Get virtual LBA given in the CDB.
2542 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2543 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2546 * Check that LBA range for I/O does not exceed volume's
2549 if ((virtLBA + (uint64_t)io_size - 1) <=
2552 * Check if the I/O crosses a stripe boundary. If not,
2553 * translate the virtual LBA to a physical LBA and set
2554 * the DevHandle for the PhysDisk to be used. If it
2555 * does cross a boundry, do normal I/O. To get the
2556 * right DevHandle to use, get the map number for the
2557 * column, then use that map number to look up the
2558 * DevHandle of the PhysDisk.
2560 stripe_offset = (uint32_t)virtLBA &
2561 (sc->DD_stripe_size - 1);
2562 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2563 physLBA = (uint32_t)virtLBA >>
2564 sc->DD_stripe_exponent;
2565 stripe_unit = physLBA / sc->DD_num_phys_disks;
2566 column = physLBA % sc->DD_num_phys_disks;
2567 pIO_req->DevHandle =
2568 htole16(sc->DD_column_map[column].dev_handle);
2569 /* ???? Is this endian safe*/
2570 cm->cm_desc.SCSIIO.DevHandle =
2573 physLBA = (stripe_unit <<
2574 sc->DD_stripe_exponent) + stripe_offset;
2575 ptrLBA = &pIO_req->CDB.CDB32[1];
2576 physLBA_byte = (uint8_t)(physLBA >> 16);
2577 *ptrLBA = physLBA_byte;
2578 ptrLBA = &pIO_req->CDB.CDB32[2];
2579 physLBA_byte = (uint8_t)(physLBA >> 8);
2580 *ptrLBA = physLBA_byte;
2581 ptrLBA = &pIO_req->CDB.CDB32[3];
2582 physLBA_byte = (uint8_t)physLBA;
2583 *ptrLBA = physLBA_byte;
2586 * Set flag that Direct Drive I/O is
2589 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2596 * Handle 10, 12 or 16 byte CDBs.
2598 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2599 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2600 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2601 (CDB[0] == WRITE_12))) {
2603 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2604 * are 0. If not, this is accessing beyond 2TB so handle it in
2605 * the else section. 10-byte and 12-byte CDB's are OK.
2606 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2607 * ready to accept 12byte CDB for Direct IOs.
2609 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2610 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2611 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2613 * Get the transfer size in blocks.
2615 io_size = (cm->cm_length >> sc->DD_block_exponent);
2618 * Get virtual LBA. Point to correct lower 4 bytes of
2619 * LBA in the CDB depending on command.
2621 lba_idx = ((CDB[0] == READ_12) ||
2622 (CDB[0] == WRITE_12) ||
2623 (CDB[0] == READ_10) ||
2624 (CDB[0] == WRITE_10))? 2 : 6;
2625 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2626 ((uint64_t)CDB[lba_idx + 1] << 16) |
2627 ((uint64_t)CDB[lba_idx + 2] << 8) |
2628 (uint64_t)CDB[lba_idx + 3];
2631 * Check that LBA range for I/O does not exceed volume's
2634 if ((virtLBA + (uint64_t)io_size - 1) <=
2637 * Check if the I/O crosses a stripe boundary.
2638 * If not, translate the virtual LBA to a
2639 * physical LBA and set the DevHandle for the
2640 * PhysDisk to be used. If it does cross a
2641 * boundry, do normal I/O. To get the right
2642 * DevHandle to use, get the map number for the
2643 * column, then use that map number to look up
2644 * the DevHandle of the PhysDisk.
2646 stripe_offset = (uint32_t)virtLBA &
2647 (sc->DD_stripe_size - 1);
2648 if ((stripe_offset + io_size) <=
2649 sc->DD_stripe_size) {
2650 physLBA = (uint32_t)virtLBA >>
2651 sc->DD_stripe_exponent;
2652 stripe_unit = physLBA /
2653 sc->DD_num_phys_disks;
2655 sc->DD_num_phys_disks;
2656 pIO_req->DevHandle =
2657 htole16(sc->DD_column_map[column].
2659 cm->cm_desc.SCSIIO.DevHandle =
2662 physLBA = (stripe_unit <<
2663 sc->DD_stripe_exponent) +
2666 &pIO_req->CDB.CDB32[lba_idx];
2667 physLBA_byte = (uint8_t)(physLBA >> 24);
2668 *ptrLBA = physLBA_byte;
2670 &pIO_req->CDB.CDB32[lba_idx + 1];
2671 physLBA_byte = (uint8_t)(physLBA >> 16);
2672 *ptrLBA = physLBA_byte;
2674 &pIO_req->CDB.CDB32[lba_idx + 2];
2675 physLBA_byte = (uint8_t)(physLBA >> 8);
2676 *ptrLBA = physLBA_byte;
2678 &pIO_req->CDB.CDB32[lba_idx + 3];
2679 physLBA_byte = (uint8_t)physLBA;
2680 *ptrLBA = physLBA_byte;
2683 * Set flag that Direct Drive I/O is
2686 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2691 * 16-byte CDB and the upper 4 bytes of the CDB are not
2692 * 0. Get the transfer size in blocks.
2694 io_size = (cm->cm_length >> sc->DD_block_exponent);
2699 virtLBA = ((uint64_t)CDB[2] << 54) |
2700 ((uint64_t)CDB[3] << 48) |
2701 ((uint64_t)CDB[4] << 40) |
2702 ((uint64_t)CDB[5] << 32) |
2703 ((uint64_t)CDB[6] << 24) |
2704 ((uint64_t)CDB[7] << 16) |
2705 ((uint64_t)CDB[8] << 8) |
2709 * Check that LBA range for I/O does not exceed volume's
2712 if ((virtLBA + (uint64_t)io_size - 1) <=
2715 * Check if the I/O crosses a stripe boundary.
2716 * If not, translate the virtual LBA to a
2717 * physical LBA and set the DevHandle for the
2718 * PhysDisk to be used. If it does cross a
2719 * boundry, do normal I/O. To get the right
2720 * DevHandle to use, get the map number for the
2721 * column, then use that map number to look up
2722 * the DevHandle of the PhysDisk.
2724 stripe_offset = (uint32_t)virtLBA &
2725 (sc->DD_stripe_size - 1);
2726 if ((stripe_offset + io_size) <=
2727 sc->DD_stripe_size) {
2728 physLBA = (uint32_t)(virtLBA >>
2729 sc->DD_stripe_exponent);
2730 stripe_unit = physLBA /
2731 sc->DD_num_phys_disks;
2733 sc->DD_num_phys_disks;
2734 pIO_req->DevHandle =
2735 htole16(sc->DD_column_map[column].
2737 cm->cm_desc.SCSIIO.DevHandle =
2740 physLBA = (stripe_unit <<
2741 sc->DD_stripe_exponent) +
2745 * Set upper 4 bytes of LBA to 0. We
2746 * assume that the phys disks are less
2747 * than 2 TB's in size. Then, set the
2750 pIO_req->CDB.CDB32[2] = 0;
2751 pIO_req->CDB.CDB32[3] = 0;
2752 pIO_req->CDB.CDB32[4] = 0;
2753 pIO_req->CDB.CDB32[5] = 0;
2754 ptrLBA = &pIO_req->CDB.CDB32[6];
2755 physLBA_byte = (uint8_t)(physLBA >> 24);
2756 *ptrLBA = physLBA_byte;
2757 ptrLBA = &pIO_req->CDB.CDB32[7];
2758 physLBA_byte = (uint8_t)(physLBA >> 16);
2759 *ptrLBA = physLBA_byte;
2760 ptrLBA = &pIO_req->CDB.CDB32[8];
2761 physLBA_byte = (uint8_t)(physLBA >> 8);
2762 *ptrLBA = physLBA_byte;
2763 ptrLBA = &pIO_req->CDB.CDB32[9];
2764 physLBA_byte = (uint8_t)physLBA;
2765 *ptrLBA = physLBA_byte;
2768 * Set flag that Direct Drive I/O is
2771 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2778 #if __FreeBSD_version >= 900026
2780 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2782 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2783 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2787 ccb = cm->cm_complete_data;
2790 * Currently there should be no way we can hit this case. It only
2791 * happens when we have a failure to allocate chain frames, and SMP
2792 * commands require two S/G elements only. That should be handled
2793 * in the standard request size.
2795 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2796 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2797 __func__, cm->cm_flags);
2798 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2802 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2804 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2805 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2809 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2810 sasaddr = le32toh(req->SASAddress.Low);
2811 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2813 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2814 MPI2_IOCSTATUS_SUCCESS ||
2815 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2816 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2817 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2818 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2822 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2823 "%#jx completed successfully\n", __func__,
2824 (uintmax_t)sasaddr);
2826 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2827 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2829 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2833 * We sync in both directions because we had DMAs in the S/G list
2834 * in both directions.
2836 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2837 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2838 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2839 mps_free_command(sc, cm);
2844 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2846 struct mps_command *cm;
2847 uint8_t *request, *response;
2848 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2849 struct mps_softc *sc;
2856 * XXX We don't yet support physical addresses here.
2858 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2859 case CAM_DATA_PADDR:
2860 case CAM_DATA_SG_PADDR:
2861 mps_dprint(sc, MPS_ERROR,
2862 "%s: physical addresses not supported\n", __func__);
2863 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2868 * The chip does not support more than one buffer for the
2869 * request or response.
2871 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2872 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2873 mps_dprint(sc, MPS_ERROR,
2874 "%s: multiple request or response "
2875 "buffer segments not supported for SMP\n",
2877 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2883 * The CAM_SCATTER_VALID flag was originally implemented
2884 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2885 * We have two. So, just take that flag to mean that we
2886 * might have S/G lists, and look at the S/G segment count
2887 * to figure out whether that is the case for each individual
2890 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2891 bus_dma_segment_t *req_sg;
2893 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2894 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2896 request = ccb->smpio.smp_request;
2898 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2899 bus_dma_segment_t *rsp_sg;
2901 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2902 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2904 response = ccb->smpio.smp_response;
2906 case CAM_DATA_VADDR:
2907 request = ccb->smpio.smp_request;
2908 response = ccb->smpio.smp_response;
2911 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2916 cm = mps_alloc_command(sc);
2918 mps_dprint(sc, MPS_ERROR,
2919 "%s: cannot allocate command\n", __func__);
2920 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2925 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2926 bzero(req, sizeof(*req));
2927 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2929 /* Allow the chip to use any route to this SAS address. */
2930 req->PhysicalPort = 0xff;
2932 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2934 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2936 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2937 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2939 mpi_init_sge(cm, req, &req->SGL);
2942 * Set up a uio to pass into mps_map_command(). This allows us to
2943 * do one map command, and one busdma call in there.
2945 cm->cm_uio.uio_iov = cm->cm_iovec;
2946 cm->cm_uio.uio_iovcnt = 2;
2947 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2950 * The read/write flag isn't used by busdma, but set it just in
2951 * case. This isn't exactly accurate, either, since we're going in
2954 cm->cm_uio.uio_rw = UIO_WRITE;
2956 cm->cm_iovec[0].iov_base = request;
2957 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2958 cm->cm_iovec[1].iov_base = response;
2959 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2961 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2962 cm->cm_iovec[1].iov_len;
2965 * Trigger a warning message in mps_data_cb() for the user if we
2966 * wind up exceeding two S/G segments. The chip expects one
2967 * segment for the request and another for the response.
2969 cm->cm_max_segs = 2;
2971 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2972 cm->cm_complete = mpssas_smpio_complete;
2973 cm->cm_complete_data = ccb;
2976 * Tell the mapping code that we're using a uio, and that this is
2977 * an SMP passthrough request. There is a little special-case
2978 * logic there (in mps_data_cb()) to handle the bidirectional
2981 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2982 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2984 /* The chip data format is little endian. */
2985 req->SASAddress.High = htole32(sasaddr >> 32);
2986 req->SASAddress.Low = htole32(sasaddr);
2989 * XXX Note that we don't have a timeout/abort mechanism here.
2990 * From the manual, it looks like task management requests only
2991 * work for SCSI IO and SATA passthrough requests. We may need to
2992 * have a mechanism to retry requests in the event of a chip reset
2993 * at least. Hopefully the chip will insure that any errors short
2994 * of that are relayed back to the driver.
2996 error = mps_map_command(sc, cm);
2997 if ((error != 0) && (error != EINPROGRESS)) {
2998 mps_dprint(sc, MPS_ERROR,
2999 "%s: error %d returned from mps_map_command()\n",
3007 mps_free_command(sc, cm);
3008 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3015 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
3017 struct mps_softc *sc;
3018 struct mpssas_target *targ;
3019 uint64_t sasaddr = 0;
3024 * Make sure the target exists.
3026 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3027 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3028 targ = &sassc->targets[ccb->ccb_h.target_id];
3029 if (targ->handle == 0x0) {
3030 mps_dprint(sc, MPS_ERROR,
3031 "%s: target %d does not exist!\n", __func__,
3032 ccb->ccb_h.target_id);
3033 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3039 * If this device has an embedded SMP target, we'll talk to it
3041 * figure out what the expander's address is.
3043 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3044 sasaddr = targ->sasaddr;
3047 * If we don't have a SAS address for the expander yet, try
3048 * grabbing it from the page 0x83 information cached in the
3049 * transport layer for this target. LSI expanders report the
3050 * expander SAS address as the port-associated SAS address in
3051 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3054 * XXX KDM disable this for now, but leave it commented out so that
3055 * it is obvious that this is another possible way to get the SAS
3058 * The parent handle method below is a little more reliable, and
3059 * the other benefit is that it works for devices other than SES
3060 * devices. So you can send a SMP request to a da(4) device and it
3061 * will get routed to the expander that device is attached to.
3062 * (Assuming the da(4) device doesn't contain an SMP target...)
3066 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3070 * If we still don't have a SAS address for the expander, look for
3071 * the parent device of this device, which is probably the expander.
3074 #ifdef OLD_MPS_PROBE
3075 struct mpssas_target *parent_target;
3078 if (targ->parent_handle == 0x0) {
3079 mps_dprint(sc, MPS_ERROR,
3080 "%s: handle %d does not have a valid "
3081 "parent handle!\n", __func__, targ->handle);
3082 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3085 #ifdef OLD_MPS_PROBE
3086 parent_target = mpssas_find_target_by_handle(sassc, 0,
3087 targ->parent_handle);
3089 if (parent_target == NULL) {
3090 mps_dprint(sc, MPS_ERROR,
3091 "%s: handle %d does not have a valid "
3092 "parent target!\n", __func__, targ->handle);
3093 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3097 if ((parent_target->devinfo &
3098 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3099 mps_dprint(sc, MPS_ERROR,
3100 "%s: handle %d parent %d does not "
3101 "have an SMP target!\n", __func__,
3102 targ->handle, parent_target->handle);
3103 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3108 sasaddr = parent_target->sasaddr;
3109 #else /* OLD_MPS_PROBE */
3110 if ((targ->parent_devinfo &
3111 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3112 mps_dprint(sc, MPS_ERROR,
3113 "%s: handle %d parent %d does not "
3114 "have an SMP target!\n", __func__,
3115 targ->handle, targ->parent_handle);
3116 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3120 if (targ->parent_sasaddr == 0x0) {
3121 mps_dprint(sc, MPS_ERROR,
3122 "%s: handle %d parent handle %d does "
3123 "not have a valid SAS address!\n",
3124 __func__, targ->handle, targ->parent_handle);
3125 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3129 sasaddr = targ->parent_sasaddr;
3130 #endif /* OLD_MPS_PROBE */
3135 mps_dprint(sc, MPS_INFO,
3136 "%s: unable to find SAS address for handle %d\n",
3137 __func__, targ->handle);
3138 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3141 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3149 #endif //__FreeBSD_version >= 900026
3152 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3154 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3155 struct mps_softc *sc;
3156 struct mps_command *tm;
3157 struct mpssas_target *targ;
3159 MPS_FUNCTRACE(sassc->sc);
3160 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3162 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3163 ("Target %d out of bounds in XPT_RESET_DEV\n",
3164 ccb->ccb_h.target_id));
3166 tm = mps_alloc_command(sc);
3168 mps_dprint(sc, MPS_ERROR,
3169 "command alloc failure in mpssas_action_resetdev\n");
3170 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3175 targ = &sassc->targets[ccb->ccb_h.target_id];
3176 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3177 req->DevHandle = htole16(targ->handle);
3178 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3179 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3181 /* SAS Hard Link Reset / SATA Link Reset */
3182 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3185 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3186 tm->cm_complete = mpssas_resetdev_complete;
3187 tm->cm_complete_data = ccb;
3189 targ->flags |= MPSSAS_TARGET_INRESET;
3191 mps_map_command(sc, tm);
3195 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3197 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3201 mtx_assert(&sc->mps_mtx, MA_OWNED);
3203 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3204 ccb = tm->cm_complete_data;
3207 * Currently there should be no way we can hit this case. It only
3208 * happens when we have a failure to allocate chain frames, and
3209 * task management commands don't have S/G lists.
3211 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3212 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3214 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3216 mps_dprint(sc, MPS_ERROR,
3217 "%s: cm_flags = %#x for reset of handle %#04x! "
3218 "This should not happen!\n", __func__, tm->cm_flags,
3220 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3224 mps_dprint(sc, MPS_XINFO,
3225 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3226 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3228 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3229 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3230 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3234 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3238 mpssas_free_tm(sc, tm);
3243 mpssas_poll(struct cam_sim *sim)
3245 struct mpssas_softc *sassc;
3247 sassc = cam_sim_softc(sim);
3249 if (sassc->sc->mps_debug & MPS_TRACE) {
3250 /* frequent debug messages during a panic just slow
3251 * everything down too much.
3253 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3254 sassc->sc->mps_debug &= ~MPS_TRACE;
3257 mps_intr_locked(sassc->sc);
3261 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3264 struct mps_softc *sc;
3266 sc = (struct mps_softc *)callback_arg;
3269 #if (__FreeBSD_version >= 1000006) || \
3270 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3271 case AC_ADVINFO_CHANGED: {
3272 struct mpssas_target *target;
3273 struct mpssas_softc *sassc;
3274 struct scsi_read_capacity_data_long rcap_buf;
3275 struct ccb_dev_advinfo cdai;
3276 struct mpssas_lun *lun;
3281 buftype = (uintptr_t)arg;
3287 * We're only interested in read capacity data changes.
3289 if (buftype != CDAI_TYPE_RCAPLONG)
3293 * We should have a handle for this, but check to make sure.
3295 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3296 ("Target %d out of bounds in mpssas_async\n",
3297 xpt_path_target_id(path)));
3298 target = &sassc->targets[xpt_path_target_id(path)];
3299 if (target->handle == 0)
3302 lunid = xpt_path_lun_id(path);
3304 SLIST_FOREACH(lun, &target->luns, lun_link) {
3305 if (lun->lun_id == lunid) {
3311 if (found_lun == 0) {
3312 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3315 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3316 "LUN for EEDP support.\n");
3319 lun->lun_id = lunid;
3320 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3323 bzero(&rcap_buf, sizeof(rcap_buf));
3324 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3325 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3326 cdai.ccb_h.flags = CAM_DIR_IN;
3327 cdai.buftype = CDAI_TYPE_RCAPLONG;
3328 #if (__FreeBSD_version >= 1100061) || \
3329 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3330 cdai.flags = CDAI_FLAG_NONE;
3334 cdai.bufsiz = sizeof(rcap_buf);
3335 cdai.buf = (uint8_t *)&rcap_buf;
3336 xpt_action((union ccb *)&cdai);
3337 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3338 cam_release_devq(cdai.ccb_h.path,
3341 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3342 && (rcap_buf.prot & SRC16_PROT_EN)) {
3343 lun->eedp_formatted = TRUE;
3344 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3346 lun->eedp_formatted = FALSE;
3347 lun->eedp_block_size = 0;
3352 case AC_FOUND_DEVICE: {
3353 struct ccb_getdev *cgd;
3356 mpssas_check_eedp(sc, path, cgd);
3365 #if (__FreeBSD_version < 901503) || \
3366 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3368 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3369 struct ccb_getdev *cgd)
3371 struct mpssas_softc *sassc = sc->sassc;
3372 struct ccb_scsiio *csio;
3373 struct scsi_read_capacity_16 *scsi_cmd;
3374 struct scsi_read_capacity_eedp *rcap_buf;
3376 target_id_t targetid;
3379 struct cam_path *local_path;
3380 struct mpssas_target *target;
3381 struct mpssas_lun *lun;
3386 pathid = cam_sim_path(sassc->sim);
3387 targetid = xpt_path_target_id(path);
3388 lunid = xpt_path_lun_id(path);
3390 KASSERT(targetid < sassc->maxtargets,
3391 ("Target %d out of bounds in mpssas_check_eedp\n",
3393 target = &sassc->targets[targetid];
3394 if (target->handle == 0x0)
3398 * Determine if the device is EEDP capable.
3400 * If this flag is set in the inquiry data,
3401 * the device supports protection information,
3402 * and must support the 16 byte read
3403 * capacity command, otherwise continue without
3404 * sending read cap 16
3406 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3410 * Issue a READ CAPACITY 16 command. This info
3411 * is used to determine if the LUN is formatted
3414 ccb = xpt_alloc_ccb_nowait();
3416 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3417 "for EEDP support.\n");
3421 if (xpt_create_path(&local_path, xpt_periph,
3422 pathid, targetid, lunid) != CAM_REQ_CMP) {
3423 mps_dprint(sc, MPS_ERROR, "Unable to create "
3424 "path for EEDP support\n");
3430 * If LUN is already in list, don't create a new
3434 SLIST_FOREACH(lun, &target->luns, lun_link) {
3435 if (lun->lun_id == lunid) {
3441 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3444 mps_dprint(sc, MPS_ERROR,
3445 "Unable to alloc LUN for EEDP support.\n");
3446 xpt_free_path(local_path);
3450 lun->lun_id = lunid;
3451 SLIST_INSERT_HEAD(&target->luns, lun,
3455 xpt_path_string(local_path, path_str, sizeof(path_str));
3457 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3458 path_str, target->handle);
3461 * Issue a READ CAPACITY 16 command for the LUN.
3462 * The mpssas_read_cap_done function will load
3463 * the read cap info into the LUN struct.
3465 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3466 M_MPT2, M_NOWAIT | M_ZERO);
3467 if (rcap_buf == NULL) {
3468 mps_dprint(sc, MPS_FAULT,
3469 "Unable to alloc read capacity buffer for EEDP support.\n");
3470 xpt_free_path(ccb->ccb_h.path);
3474 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3476 csio->ccb_h.func_code = XPT_SCSI_IO;
3477 csio->ccb_h.flags = CAM_DIR_IN;
3478 csio->ccb_h.retry_count = 4;
3479 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3480 csio->ccb_h.timeout = 60000;
3481 csio->data_ptr = (uint8_t *)rcap_buf;
3482 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3483 csio->sense_len = MPS_SENSE_LEN;
3484 csio->cdb_len = sizeof(*scsi_cmd);
3485 csio->tag_action = MSG_SIMPLE_Q_TAG;
3487 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3488 bzero(scsi_cmd, sizeof(*scsi_cmd));
3489 scsi_cmd->opcode = 0x9E;
3490 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3491 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3493 ccb->ccb_h.ppriv_ptr1 = sassc;
3498 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3500 struct mpssas_softc *sassc;
3501 struct mpssas_target *target;
3502 struct mpssas_lun *lun;
3503 struct scsi_read_capacity_eedp *rcap_buf;
3505 if (done_ccb == NULL)
3508 /* Driver need to release devq, it Scsi command is
3509 * generated by driver internally.
3510 * Currently there is a single place where driver
3511 * calls scsi command internally. In future if driver
3512 * calls more scsi command internally, it needs to release
3513 * devq internally, since those command will not go back to
3516 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3517 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3518 xpt_release_devq(done_ccb->ccb_h.path,
3519 /*count*/ 1, /*run_queue*/TRUE);
3522 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3525 * Get the LUN ID for the path and look it up in the LUN list for the
3528 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3529 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3530 ("Target %d out of bounds in mpssas_read_cap_done\n",
3531 done_ccb->ccb_h.target_id));
3532 target = &sassc->targets[done_ccb->ccb_h.target_id];
3533 SLIST_FOREACH(lun, &target->luns, lun_link) {
3534 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3538 * Got the LUN in the target's LUN list. Fill it in
3539 * with EEDP info. If the READ CAP 16 command had some
3540 * SCSI error (common if command is not supported), mark
3541 * the lun as not supporting EEDP and set the block size
3544 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3545 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3546 lun->eedp_formatted = FALSE;
3547 lun->eedp_block_size = 0;
3551 if (rcap_buf->protect & 0x01) {
3552 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3553 "target ID %d is formatted for EEDP "
3554 "support.\n", done_ccb->ccb_h.target_lun,
3555 done_ccb->ccb_h.target_id);
3556 lun->eedp_formatted = TRUE;
3557 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3562 // Finished with this CCB and path.
3563 free(rcap_buf, M_MPT2);
3564 xpt_free_path(done_ccb->ccb_h.path);
3565 xpt_free_ccb(done_ccb);
3567 #endif /* (__FreeBSD_version < 901503) || \
3568 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3571 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3572 struct mpssas_target *target, lun_id_t lun_id)
3578 * Set the INRESET flag for this target so that no I/O will be sent to
3579 * the target until the reset has completed. If an I/O request does
3580 * happen, the devq will be frozen. The CCB holds the path which is
3581 * used to release the devq. The devq is released and the CCB is freed
3582 * when the TM completes.
3584 ccb = xpt_alloc_ccb_nowait();
3586 path_id = cam_sim_path(sc->sassc->sim);
3587 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3588 target->tid, lun_id) != CAM_REQ_CMP) {
3592 tm->cm_targ = target;
3593 target->flags |= MPSSAS_TARGET_INRESET;
3599 mpssas_startup(struct mps_softc *sc)
3603 * Send the port enable message and set the wait_for_port_enable flag.
3604 * This flag helps to keep the simq frozen until all discovery events
3607 sc->wait_for_port_enable = 1;
3608 mpssas_send_portenable(sc);
3613 mpssas_send_portenable(struct mps_softc *sc)
3615 MPI2_PORT_ENABLE_REQUEST *request;
3616 struct mps_command *cm;
3620 if ((cm = mps_alloc_command(sc)) == NULL)
3622 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3623 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3624 request->MsgFlags = 0;
3626 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3627 cm->cm_complete = mpssas_portenable_complete;
3631 mps_map_command(sc, cm);
3632 mps_dprint(sc, MPS_XINFO,
3633 "mps_send_portenable finished cm %p req %p complete %p\n",
3634 cm, cm->cm_req, cm->cm_complete);
3639 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3641 MPI2_PORT_ENABLE_REPLY *reply;
3642 struct mpssas_softc *sassc;
3648 * Currently there should be no way we can hit this case. It only
3649 * happens when we have a failure to allocate chain frames, and
3650 * port enable commands don't have S/G lists.
3652 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3653 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3654 "This should not happen!\n", __func__, cm->cm_flags);
3657 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3659 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3660 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3661 MPI2_IOCSTATUS_SUCCESS)
3662 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3664 mps_free_command(sc, cm);
3665 if (sc->mps_ich.ich_arg != NULL) {
3666 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3667 config_intrhook_disestablish(&sc->mps_ich);
3668 sc->mps_ich.ich_arg = NULL;
3672 * Get WarpDrive info after discovery is complete but before the scan
3673 * starts. At this point, all devices are ready to be exposed to the
3674 * OS. If devices should be hidden instead, take them out of the
3675 * 'targets' array before the scan. The devinfo for a disk will have
3676 * some info and a volume's will be 0. Use that to remove disks.
3678 mps_wd_config_pages(sc);
3681 * Done waiting for port enable to complete. Decrement the refcount.
3682 * If refcount is 0, discovery is complete and a rescan of the bus can
3683 * take place. Since the simq was explicitly frozen before port
3684 * enable, it must be explicitly released here to keep the
3685 * freeze/release count in sync.
3687 sc->wait_for_port_enable = 0;
3688 sc->port_enable_complete = 1;
3689 wakeup(&sc->port_enable_complete);
3690 mpssas_startup_decrement(sassc);
3694 mpssas_check_id(struct mpssas_softc *sassc, int id)
3696 struct mps_softc *sc = sassc->sc;
3700 ids = &sc->exclude_ids[0];
3701 while((name = strsep(&ids, ",")) != NULL) {
3702 if (name[0] == '\0')
3704 if (strtol(name, NULL, 0) == (long)id)
3712 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3714 struct mpssas_softc *sassc;
3715 struct mpssas_lun *lun, *lun_tmp;
3716 struct mpssas_target *targ;
3721 * The number of targets is based on IOC Facts, so free all of
3722 * the allocated LUNs for each target and then the target buffer
3725 for (i=0; i< maxtargets; i++) {
3726 targ = &sassc->targets[i];
3727 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3731 free(sassc->targets, M_MPT2);
3733 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3734 M_MPT2, M_WAITOK|M_ZERO);
3735 if (!sassc->targets) {
3736 panic("%s failed to alloc targets with error %d\n",