2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2015 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 /* Communications core for Avago Technologies (LSI) MPT2 */
38 /* TODO Move headers to mpsvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
48 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
57 #include <machine/bus.h>
58 #include <machine/resource.h>
61 #include <machine/stdarg.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_xpt.h>
66 #include <cam/cam_debug.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_periph.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73 #if __FreeBSD_version >= 900026
74 #include <cam/scsi/smp_all.h>
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
89 #define MPSSAS_DISCOVERY_TIMEOUT 20
90 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
93 * static array to check SCSI OpCode for EEDP protection bits
95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
124 struct mps_command *cm);
125 static void mpssas_scsiio_timeout(void *data);
126 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
127 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
128 struct mps_command *cm, union ccb *ccb);
129 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
130 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
131 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
132 #if __FreeBSD_version >= 900026
133 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
134 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
136 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
137 #endif //FreeBSD_version >= 900026
138 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149 struct mps_command *cm);
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
154 struct mpssas_target *target;
157 for (i = start; i < sassc->maxtargets; i++) {
158 target = &sassc->targets[i];
159 if (target->handle == handle)
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery. Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
174 mpssas_startup_increment(struct mpssas_softc *sassc)
176 MPS_FUNCTRACE(sassc->sc);
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INIT,
182 "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
186 xpt_freeze_simq(sassc->sim, 1);
188 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 sassc->startup_refcount);
194 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
196 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
197 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
198 xpt_release_simq(sassc->sim, 1);
199 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
204 mpssas_startup_decrement(struct mpssas_softc *sassc)
206 MPS_FUNCTRACE(sassc->sc);
208 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
209 if (--sassc->startup_refcount == 0) {
210 /* finished all discovery-related actions, release
211 * the simq and rescan for the latest topology.
213 mps_dprint(sassc->sc, MPS_INIT,
214 "%s releasing simq\n", __func__);
215 sassc->flags &= ~MPSSAS_IN_STARTUP;
216 xpt_release_simq(sassc->sim, 1);
217 #if __FreeBSD_version >= 1000039
220 mpssas_rescan_target(sassc->sc, NULL);
223 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
224 sassc->startup_refcount);
228 /* The firmware requires us to stop sending commands when we're doing task
229 * management, so refcount the TMs and keep the simq frozen when any are in
233 mpssas_alloc_tm(struct mps_softc *sc)
235 struct mps_command *tm;
237 tm = mps_alloc_high_priority_command(sc);
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
244 int target_id = 0xFFFFFFFF;
250 * For TM's the devq is frozen for the device. Unfreeze it here and
251 * free the resources used for freezing the devq. Must clear the
252 * INRESET flag as well or scsi I/O will not work.
254 if (tm->cm_targ != NULL) {
255 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
256 target_id = tm->cm_targ->tid;
259 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
261 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
262 xpt_free_path(tm->cm_ccb->ccb_h.path);
263 xpt_free_ccb(tm->cm_ccb);
266 mps_free_high_priority_command(sc, tm);
270 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
272 struct mpssas_softc *sassc = sc->sassc;
274 target_id_t targetid;
278 pathid = cam_sim_path(sassc->sim);
280 targetid = CAM_TARGET_WILDCARD;
282 targetid = targ - sassc->targets;
285 * Allocate a CCB and schedule a rescan.
287 ccb = xpt_alloc_ccb_nowait();
289 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
293 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
294 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
295 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
300 if (targetid == CAM_TARGET_WILDCARD)
301 ccb->ccb_h.func_code = XPT_SCAN_BUS;
303 ccb->ccb_h.func_code = XPT_SCAN_TGT;
305 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
310 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
320 /* No need to be in here if debugging isn't enabled */
321 if ((cm->cm_sc->mps_debug & level) == 0)
324 sbuf_new(&sb, str, sizeof(str), 0);
328 if (cm->cm_ccb != NULL) {
329 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
331 sbuf_cat(&sb, path_str);
332 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
333 scsi_command_string(&cm->cm_ccb->csio, &sb);
334 sbuf_printf(&sb, "length %d ",
335 cm->cm_ccb->csio.dxfer_len);
339 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
340 cam_sim_name(cm->cm_sc->sassc->sim),
341 cam_sim_unit(cm->cm_sc->sassc->sim),
342 cam_sim_bus(cm->cm_sc->sassc->sim),
343 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
347 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
348 sbuf_vprintf(&sb, fmt, ap);
350 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
357 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
359 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
360 struct mpssas_target *targ;
365 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
366 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
370 /* XXX retry the remove after the diag reset completes? */
371 mps_dprint(sc, MPS_FAULT,
372 "%s NULL reply resetting device 0x%04x\n", __func__,
374 mpssas_free_tm(sc, tm);
378 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 MPI2_IOCSTATUS_SUCCESS) {
380 mps_dprint(sc, MPS_ERROR,
381 "IOCStatus = 0x%x while resetting device 0x%x\n",
382 le16toh(reply->IOCStatus), handle);
385 mps_dprint(sc, MPS_XINFO,
386 "Reset aborted %u commands\n", reply->TerminationCount);
387 mps_free_reply(sc, tm->cm_reply_data);
388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
390 mps_dprint(sc, MPS_XINFO,
391 "clearing target %u handle 0x%04x\n", targ->tid, handle);
394 * Don't clear target if remove fails because things will get confusing.
395 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 * this target id if possible, and so we can assign the same target id
397 * to this device if it comes back in the future.
399 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
400 MPI2_IOCSTATUS_SUCCESS) {
403 targ->encl_handle = 0x0;
404 targ->encl_slot = 0x0;
405 targ->exp_dev_handle = 0x0;
407 targ->linkrate = 0x0;
412 mpssas_free_tm(sc, tm);
417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418 * Otherwise Volume Delete is same as Bare Drive Removal.
421 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
423 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424 struct mps_softc *sc;
425 struct mps_command *cm;
426 struct mpssas_target *targ = NULL;
428 MPS_FUNCTRACE(sassc->sc);
433 * If this is a WD controller, determine if the disk should be exposed
434 * to the OS or not. If disk should be exposed, return from this
435 * function without doing anything.
437 if (sc->WD_available && (sc->WD_hide_expose ==
438 MPS_WD_EXPOSE_ALWAYS)) {
443 targ = mpssas_find_target_by_handle(sassc, 0, handle);
445 /* FIXME: what is the action? */
446 /* We don't know about this device? */
447 mps_dprint(sc, MPS_ERROR,
448 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
452 targ->flags |= MPSSAS_TARGET_INREMOVAL;
454 cm = mpssas_alloc_tm(sc);
456 mps_dprint(sc, MPS_ERROR,
457 "%s: command alloc failure\n", __func__);
461 mpssas_rescan_target(sc, targ);
463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
464 req->DevHandle = targ->handle;
465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
466 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
468 /* SAS Hard Link Reset / SATA Link Reset */
469 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
473 cm->cm_desc.HighPriority.RequestFlags =
474 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
475 cm->cm_complete = mpssas_remove_volume;
476 cm->cm_complete_data = (void *)(uintptr_t)handle;
478 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
479 __func__, targ->tid);
480 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
482 mps_map_command(sc, cm);
486 * The MPT2 firmware performs debounce on the link to avoid transient link
487 * errors and false removals. When it does decide that link has been lost
488 * and a device need to go away, it expects that the host will perform a
489 * target reset and then an op remove. The reset has the side-effect of
490 * aborting any outstanding requests for the device, which is required for
491 * the op-remove to succeed. It's not clear if the host should check for
492 * the device coming back alive after the reset.
495 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
497 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
498 struct mps_softc *sc;
499 struct mps_command *cm;
500 struct mpssas_target *targ = NULL;
502 MPS_FUNCTRACE(sassc->sc);
506 targ = mpssas_find_target_by_handle(sassc, 0, handle);
508 /* FIXME: what is the action? */
509 /* We don't know about this device? */
510 mps_dprint(sc, MPS_ERROR,
511 "%s : invalid handle 0x%x \n", __func__, handle);
515 targ->flags |= MPSSAS_TARGET_INREMOVAL;
517 cm = mpssas_alloc_tm(sc);
519 mps_dprint(sc, MPS_ERROR,
520 "%s: command alloc failure\n", __func__);
524 mpssas_rescan_target(sc, targ);
526 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
527 memset(req, 0, sizeof(*req));
528 req->DevHandle = htole16(targ->handle);
529 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
530 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
532 /* SAS Hard Link Reset / SATA Link Reset */
533 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
537 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
538 cm->cm_complete = mpssas_remove_device;
539 cm->cm_complete_data = (void *)(uintptr_t)handle;
541 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
542 __func__, targ->tid);
543 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
545 mps_map_command(sc, cm);
549 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
551 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
552 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
553 struct mpssas_target *targ;
554 struct mps_command *next_cm;
559 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
560 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
564 * Currently there should be no way we can hit this case. It only
565 * happens when we have a failure to allocate chain frames, and
566 * task management commands don't have S/G lists.
568 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
569 mps_dprint(sc, MPS_ERROR,
570 "%s: cm_flags = %#x for remove of handle %#04x! "
571 "This should not happen!\n", __func__, tm->cm_flags,
576 /* XXX retry the remove after the diag reset completes? */
577 mps_dprint(sc, MPS_FAULT,
578 "%s NULL reply resetting device 0x%04x\n", __func__,
580 mpssas_free_tm(sc, tm);
584 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
585 MPI2_IOCSTATUS_SUCCESS) {
586 mps_dprint(sc, MPS_ERROR,
587 "IOCStatus = 0x%x while resetting device 0x%x\n",
588 le16toh(reply->IOCStatus), handle);
591 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
592 le32toh(reply->TerminationCount));
593 mps_free_reply(sc, tm->cm_reply_data);
594 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
596 /* Reuse the existing command */
597 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
598 memset(req, 0, sizeof(*req));
599 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
600 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
601 req->DevHandle = htole16(handle);
603 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
604 tm->cm_complete = mpssas_remove_complete;
605 tm->cm_complete_data = (void *)(uintptr_t)handle;
607 mps_map_command(sc, tm);
609 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mpssas_scsiio_complete(sc, tm);
622 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
626 struct mpssas_target *targ;
627 struct mpssas_lun *lun;
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
639 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
640 mps_dprint(sc, MPS_XINFO,
641 "%s: cm_flags = %#x for remove of handle %#04x! "
642 "This should not happen!\n", __func__, tm->cm_flags,
644 mpssas_free_tm(sc, tm);
649 /* most likely a chip reset */
650 mps_dprint(sc, MPS_FAULT,
651 "%s NULL reply removing device 0x%04x\n", __func__, handle);
652 mpssas_free_tm(sc, tm);
656 mps_dprint(sc, MPS_XINFO,
657 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
658 handle, le16toh(reply->IOCStatus));
661 * Don't clear target if remove fails because things will get confusing.
662 * Leave the devname and sasaddr intact so that we know to avoid reusing
663 * this target id if possible, and so we can assign the same target id
664 * to this device if it comes back in the future.
666 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
667 MPI2_IOCSTATUS_SUCCESS) {
670 targ->encl_handle = 0x0;
671 targ->encl_slot = 0x0;
672 targ->exp_dev_handle = 0x0;
674 targ->linkrate = 0x0;
678 while(!SLIST_EMPTY(&targ->luns)) {
679 lun = SLIST_FIRST(&targ->luns);
680 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
686 mpssas_free_tm(sc, tm);
690 mpssas_register_events(struct mps_softc *sc)
692 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
695 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
696 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
697 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
698 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
700 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
701 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
703 setbit(events, MPI2_EVENT_IR_VOLUME);
704 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
705 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
706 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
708 mps_register_events(sc, events, mpssas_evt_handler, NULL,
709 &sc->sassc->mpssas_eh);
715 mps_attach_sas(struct mps_softc *sc)
717 struct mpssas_softc *sassc;
723 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
725 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
731 * XXX MaxTargets could change during a reinit. Since we don't
732 * resize the targets[] array during such an event, cache the value
733 * of MaxTargets here so that we don't get into trouble later. This
734 * should move into the reinit logic.
736 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
737 sassc->targets = malloc(sizeof(struct mpssas_target) *
738 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
739 if(!sassc->targets) {
740 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
748 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
749 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
754 unit = device_get_unit(sc->mps_dev);
755 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
756 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
757 if (sassc->sim == NULL) {
758 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
763 TAILQ_INIT(&sassc->ev_queue);
765 /* Initialize taskqueue for Event Handling */
766 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
767 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
768 taskqueue_thread_enqueue, &sassc->ev_tq);
769 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
770 device_get_nameunit(sc->mps_dev));
775 * XXX There should be a bus for every port on the adapter, but since
776 * we're just going to fake the topology for now, we'll pretend that
777 * everything is just a target on a single bus.
779 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
780 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
787 * Assume that discovery events will start right away.
789 * Hold off boot until discovery is complete.
791 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
792 sc->sassc->startup_refcount = 0;
793 mpssas_startup_increment(sassc);
795 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
798 * Register for async events so we can determine the EEDP
799 * capabilities of devices.
801 status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
804 if (status != CAM_REQ_CMP) {
805 mps_printf(sc, "Error %#x creating sim path\n", status);
810 #if (__FreeBSD_version >= 1000006) || \
811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 event = AC_ADVINFO_CHANGED;
814 event = AC_FOUND_DEVICE;
816 status = xpt_register_async(event, mpssas_async, sc,
818 if (status != CAM_REQ_CMP) {
819 mps_dprint(sc, MPS_ERROR,
820 "Error %#x registering async handler for "
821 "AC_ADVINFO_CHANGED events\n", status);
822 xpt_free_path(sassc->path);
826 if (status != CAM_REQ_CMP) {
828 * EEDP use is the exception, not the rule.
829 * Warn the user, but do not fail to attach.
831 mps_printf(sc, "EEDP capabilities disabled.\n");
836 mpssas_register_events(sc);
844 mps_detach_sas(struct mps_softc *sc)
846 struct mpssas_softc *sassc;
847 struct mpssas_lun *lun, *lun_tmp;
848 struct mpssas_target *targ;
853 if (sc->sassc == NULL)
857 mps_deregister_events(sc, sassc->mpssas_eh);
860 * Drain and free the event handling taskqueue with the lock
861 * unheld so that any parallel processing tasks drain properly
862 * without deadlocking.
864 if (sassc->ev_tq != NULL)
865 taskqueue_free(sassc->ev_tq);
867 /* Make sure CAM doesn't wedge if we had to bail out early. */
870 /* Deregister our async handler */
871 if (sassc->path != NULL) {
872 xpt_register_async(0, mpssas_async, sc, sassc->path);
873 xpt_free_path(sassc->path);
877 if (sassc->flags & MPSSAS_IN_STARTUP)
878 xpt_release_simq(sassc->sim, 1);
880 if (sassc->sim != NULL) {
881 xpt_bus_deregister(cam_sim_path(sassc->sim));
882 cam_sim_free(sassc->sim, FALSE);
887 if (sassc->devq != NULL)
888 cam_simq_free(sassc->devq);
890 for(i=0; i< sassc->maxtargets ;i++) {
891 targ = &sassc->targets[i];
892 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
896 free(sassc->targets, M_MPT2);
904 mpssas_discovery_end(struct mpssas_softc *sassc)
906 struct mps_softc *sc = sassc->sc;
910 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
911 callout_stop(&sassc->discovery_callout);
914 * After discovery has completed, check the mapping table for any
915 * missing devices and update their missing counts. Only do this once
916 * whenever the driver is initialized so that missing counts aren't
917 * updated unnecessarily. Note that just because discovery has
918 * completed doesn't mean that events have been processed yet. The
919 * check_devices function is a callout timer that checks if ALL devices
920 * are missing. If so, it will wait a little longer for events to
921 * complete and keep resetting itself until some device in the mapping
922 * table is not missing, meaning that event processing has started.
924 if (sc->track_mapping_events) {
925 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
926 "completed. Check for missing devices in the mapping "
928 callout_reset(&sc->device_check_callout,
929 MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
935 mpssas_action(struct cam_sim *sim, union ccb *ccb)
937 struct mpssas_softc *sassc;
939 sassc = cam_sim_softc(sim);
941 MPS_FUNCTRACE(sassc->sc);
942 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
943 ccb->ccb_h.func_code);
944 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
946 switch (ccb->ccb_h.func_code) {
949 struct ccb_pathinq *cpi = &ccb->cpi;
950 struct mps_softc *sc = sassc->sc;
951 uint8_t sges_per_frame;
953 cpi->version_num = 1;
954 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
955 cpi->target_sprt = 0;
956 #if __FreeBSD_version >= 1000039
957 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
959 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
961 cpi->hba_eng_cnt = 0;
962 cpi->max_target = sassc->maxtargets - 1;
966 * initiator_id is set here to an ID outside the set of valid
967 * target IDs (including volumes).
969 cpi->initiator_id = sassc->maxtargets;
970 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
971 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
972 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
973 cpi->unit_number = cam_sim_unit(sim);
974 cpi->bus_id = cam_sim_bus(sim);
975 cpi->base_transfer_speed = 150000;
976 cpi->transport = XPORT_SAS;
977 cpi->transport_version = 0;
978 cpi->protocol = PROTO_SCSI;
979 cpi->protocol_version = SCSI_REV_SPC;
982 * Max IO Size is Page Size * the following:
983 * ((SGEs per frame - 1 for chain element) *
984 * Max Chain Depth) + 1 for no chain needed in last frame
986 * If user suggests a Max IO size to use, use the smaller of the
987 * user's value and the calculated value as long as the user's
988 * value is larger than 0. The user's value is in pages.
990 sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
991 sizeof(MPI2_SGE_SIMPLE64)) - 1;
992 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
993 cpi->maxio *= PAGE_SIZE;
994 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
996 cpi->maxio = sc->max_io_pages * PAGE_SIZE;
997 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1000 case XPT_GET_TRAN_SETTINGS:
1002 struct ccb_trans_settings *cts;
1003 struct ccb_trans_settings_sas *sas;
1004 struct ccb_trans_settings_scsi *scsi;
1005 struct mpssas_target *targ;
1008 sas = &cts->xport_specific.sas;
1009 scsi = &cts->proto_specific.scsi;
1011 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1012 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1013 cts->ccb_h.target_id));
1014 targ = &sassc->targets[cts->ccb_h.target_id];
1015 if (targ->handle == 0x0) {
1016 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1020 cts->protocol_version = SCSI_REV_SPC2;
1021 cts->transport = XPORT_SAS;
1022 cts->transport_version = 0;
1024 sas->valid = CTS_SAS_VALID_SPEED;
1025 switch (targ->linkrate) {
1027 sas->bitrate = 150000;
1030 sas->bitrate = 300000;
1033 sas->bitrate = 600000;
1039 cts->protocol = PROTO_SCSI;
1040 scsi->valid = CTS_SCSI_VALID_TQ;
1041 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1043 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1046 case XPT_CALC_GEOMETRY:
1047 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1048 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1051 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1052 mpssas_action_resetdev(sassc, ccb);
1057 mps_dprint(sassc->sc, MPS_XINFO,
1058 "mpssas_action faking success for abort or reset\n");
1059 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1062 mpssas_action_scsiio(sassc, ccb);
1064 #if __FreeBSD_version >= 900026
1066 mpssas_action_smpio(sassc, ccb);
1070 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1078 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1079 target_id_t target_id, lun_id_t lun_id)
1081 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1082 struct cam_path *path;
1084 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1085 ac_code, target_id, (uintmax_t)lun_id);
1087 if (xpt_create_path(&path, NULL,
1088 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1089 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1094 xpt_async(ac_code, path, NULL);
1095 xpt_free_path(path);
1099 mpssas_complete_all_commands(struct mps_softc *sc)
1101 struct mps_command *cm;
1106 mtx_assert(&sc->mps_mtx, MA_OWNED);
1108 /* complete all commands with a NULL reply */
1109 for (i = 1; i < sc->num_reqs; i++) {
1110 cm = &sc->commands[i];
1111 cm->cm_reply = NULL;
1114 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1115 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1117 if (cm->cm_complete != NULL) {
1118 mpssas_log_command(cm, MPS_RECOVERY,
1119 "completing cm %p state %x ccb %p for diag reset\n",
1120 cm, cm->cm_state, cm->cm_ccb);
1122 cm->cm_complete(sc, cm);
1126 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1127 mpssas_log_command(cm, MPS_RECOVERY,
1128 "waking up cm %p state %x ccb %p for diag reset\n",
1129 cm, cm->cm_state, cm->cm_ccb);
1134 if (cm->cm_sc->io_cmds_active != 0)
1135 cm->cm_sc->io_cmds_active--;
1137 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1138 /* this should never happen, but if it does, log */
1139 mpssas_log_command(cm, MPS_RECOVERY,
1140 "cm %p state %x flags 0x%x ccb %p during diag "
1141 "reset\n", cm, cm->cm_state, cm->cm_flags,
1148 mpssas_handle_reinit(struct mps_softc *sc)
1152 /* Go back into startup mode and freeze the simq, so that CAM
1153 * doesn't send any commands until after we've rediscovered all
1154 * targets and found the proper device handles for them.
1156 * After the reset, portenable will trigger discovery, and after all
1157 * discovery-related activities have finished, the simq will be
1160 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1161 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1162 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1163 mpssas_startup_increment(sc->sassc);
1165 /* notify CAM of a bus reset */
1166 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1169 /* complete and cleanup after all outstanding commands */
1170 mpssas_complete_all_commands(sc);
1172 mps_dprint(sc, MPS_INIT,
1173 "%s startup %u after command completion\n", __func__,
1174 sc->sassc->startup_refcount);
1176 /* zero all the target handles, since they may change after the
1177 * reset, and we have to rediscover all the targets and use the new
1180 for (i = 0; i < sc->sassc->maxtargets; i++) {
1181 if (sc->sassc->targets[i].outstanding != 0)
1182 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1183 i, sc->sassc->targets[i].outstanding);
1184 sc->sassc->targets[i].handle = 0x0;
1185 sc->sassc->targets[i].exp_dev_handle = 0x0;
1186 sc->sassc->targets[i].outstanding = 0;
1187 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1192 mpssas_tm_timeout(void *data)
1194 struct mps_command *tm = data;
1195 struct mps_softc *sc = tm->cm_sc;
1197 mtx_assert(&sc->mps_mtx, MA_OWNED);
1199 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1200 "task mgmt %p timed out\n", tm);
1205 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1207 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1208 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1209 unsigned int cm_count = 0;
1210 struct mps_command *cm;
1211 struct mpssas_target *targ;
1213 callout_stop(&tm->cm_callout);
1215 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1216 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1220 * Currently there should be no way we can hit this case. It only
1221 * happens when we have a failure to allocate chain frames, and
1222 * task management commands don't have S/G lists.
1223 * XXXSL So should it be an assertion?
1225 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1226 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1227 "This should not happen!\n", __func__, tm->cm_flags);
1228 mpssas_free_tm(sc, tm);
1232 if (reply == NULL) {
1233 mpssas_log_command(tm, MPS_RECOVERY,
1234 "NULL reset reply for tm %p\n", tm);
1235 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1236 /* this completion was due to a reset, just cleanup */
1238 mpssas_free_tm(sc, tm);
1241 /* we should have gotten a reply. */
1247 mpssas_log_command(tm, MPS_RECOVERY,
1248 "logical unit reset status 0x%x code 0x%x count %u\n",
1249 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1250 le32toh(reply->TerminationCount));
1252 /* See if there are any outstanding commands for this LUN.
1253 * This could be made more efficient by using a per-LU data
1254 * structure of some sort.
1256 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1257 if (cm->cm_lun == tm->cm_lun)
1261 if (cm_count == 0) {
1262 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1263 "logical unit %u finished recovery after reset\n",
1266 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1269 /* we've finished recovery for this logical unit. check and
1270 * see if some other logical unit has a timedout command
1271 * that needs to be processed.
1273 cm = TAILQ_FIRST(&targ->timedout_commands);
1275 mpssas_send_abort(sc, tm, cm);
1279 mpssas_free_tm(sc, tm);
1283 /* if we still have commands for this LUN, the reset
1284 * effectively failed, regardless of the status reported.
1285 * Escalate to a target reset.
1287 mpssas_log_command(tm, MPS_RECOVERY,
1288 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1290 mpssas_send_reset(sc, tm,
1291 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1296 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1298 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1299 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1300 struct mpssas_target *targ;
1302 callout_stop(&tm->cm_callout);
1304 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1305 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1309 * Currently there should be no way we can hit this case. It only
1310 * happens when we have a failure to allocate chain frames, and
1311 * task management commands don't have S/G lists.
1313 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1314 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1315 "This should not happen!\n", __func__, tm->cm_flags);
1316 mpssas_free_tm(sc, tm);
1320 if (reply == NULL) {
1321 mpssas_log_command(tm, MPS_RECOVERY,
1322 "NULL reset reply for tm %p\n", tm);
1323 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1324 /* this completion was due to a reset, just cleanup */
1326 mpssas_free_tm(sc, tm);
1329 /* we should have gotten a reply. */
1335 mpssas_log_command(tm, MPS_RECOVERY,
1336 "target reset status 0x%x code 0x%x count %u\n",
1337 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1338 le32toh(reply->TerminationCount));
1340 if (targ->outstanding == 0) {
1341 /* we've finished recovery for this target and all
1342 * of its logical units.
1344 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1345 "recovery finished after target reset\n");
1347 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1351 mpssas_free_tm(sc, tm);
1354 /* after a target reset, if this target still has
1355 * outstanding commands, the reset effectively failed,
1356 * regardless of the status reported. escalate.
1358 mpssas_log_command(tm, MPS_RECOVERY,
1359 "target reset complete for tm %p, but still have %u command(s)\n",
1360 tm, targ->outstanding);
1365 #define MPS_RESET_TIMEOUT 30
1368 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1370 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1371 struct mpssas_target *target;
1374 target = tm->cm_targ;
1375 if (target->handle == 0) {
1376 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1377 __func__, target->tid);
1381 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1382 req->DevHandle = htole16(target->handle);
1383 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1384 req->TaskType = type;
1386 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1387 /* XXX Need to handle invalid LUNs */
1388 MPS_SET_LUN(req->LUN, tm->cm_lun);
1389 tm->cm_targ->logical_unit_resets++;
1390 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1391 "sending logical unit reset\n");
1392 tm->cm_complete = mpssas_logical_unit_reset_complete;
1393 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1395 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1397 * Target reset method =
1398 * SAS Hard Link Reset / SATA Link Reset
1400 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1401 tm->cm_targ->target_resets++;
1402 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1403 "sending target reset\n");
1404 tm->cm_complete = mpssas_target_reset_complete;
1405 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1408 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1413 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1414 tm->cm_complete_data = (void *)tm;
1416 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1417 mpssas_tm_timeout, tm);
1419 err = mps_map_command(sc, tm);
1421 mpssas_log_command(tm, MPS_RECOVERY,
1422 "error %d sending reset type %u\n",
1430 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1432 struct mps_command *cm;
1433 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1434 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1435 struct mpssas_target *targ;
1437 callout_stop(&tm->cm_callout);
1439 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1440 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1444 * Currently there should be no way we can hit this case. It only
1445 * happens when we have a failure to allocate chain frames, and
1446 * task management commands don't have S/G lists.
1448 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1449 mpssas_log_command(tm, MPS_RECOVERY,
1450 "cm_flags = %#x for abort %p TaskMID %u!\n",
1451 tm->cm_flags, tm, le16toh(req->TaskMID));
1452 mpssas_free_tm(sc, tm);
1456 if (reply == NULL) {
1457 mpssas_log_command(tm, MPS_RECOVERY,
1458 "NULL abort reply for tm %p TaskMID %u\n",
1459 tm, le16toh(req->TaskMID));
1460 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1461 /* this completion was due to a reset, just cleanup */
1463 mpssas_free_tm(sc, tm);
1466 /* we should have gotten a reply. */
1472 mpssas_log_command(tm, MPS_RECOVERY,
1473 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1474 le16toh(req->TaskMID),
1475 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1476 le32toh(reply->TerminationCount));
1478 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1480 /* if there are no more timedout commands, we're done with
1481 * error recovery for this target.
1483 mpssas_log_command(tm, MPS_RECOVERY,
1484 "finished recovery after aborting TaskMID %u\n",
1485 le16toh(req->TaskMID));
1488 mpssas_free_tm(sc, tm);
1490 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1491 /* abort success, but we have more timedout commands to abort */
1492 mpssas_log_command(tm, MPS_RECOVERY,
1493 "continuing recovery after aborting TaskMID %u\n",
1494 le16toh(req->TaskMID));
1496 mpssas_send_abort(sc, tm, cm);
1499 /* we didn't get a command completion, so the abort
1500 * failed as far as we're concerned. escalate.
1502 mpssas_log_command(tm, MPS_RECOVERY,
1503 "abort failed for TaskMID %u tm %p\n",
1504 le16toh(req->TaskMID), tm);
1506 mpssas_send_reset(sc, tm,
1507 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1511 #define MPS_ABORT_TIMEOUT 5
1514 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1516 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1517 struct mpssas_target *targ;
1521 if (targ->handle == 0) {
1522 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1523 __func__, cm->cm_ccb->ccb_h.target_id);
1527 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1528 "Aborting command %p\n", cm);
1530 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1531 req->DevHandle = htole16(targ->handle);
1532 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1533 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1535 /* XXX Need to handle invalid LUNs */
1536 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1538 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1541 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1542 tm->cm_complete = mpssas_abort_complete;
1543 tm->cm_complete_data = (void *)tm;
1544 tm->cm_targ = cm->cm_targ;
1545 tm->cm_lun = cm->cm_lun;
1547 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1548 mpssas_tm_timeout, tm);
1552 mps_dprint(sc, MPS_INFO, "Sending reset from %s for target ID %d\n",
1553 __func__, targ->tid);
1554 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1556 err = mps_map_command(sc, tm);
1558 mps_dprint(sc, MPS_RECOVERY,
1559 "error %d sending abort for cm %p SMID %u\n",
1560 err, cm, req->TaskMID);
1565 mpssas_scsiio_timeout(void *data)
1567 struct mps_softc *sc;
1568 struct mps_command *cm;
1569 struct mpssas_target *targ;
1571 cm = (struct mps_command *)data;
1575 mtx_assert(&sc->mps_mtx, MA_OWNED);
1577 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1580 * Run the interrupt handler to make sure it's not pending. This
1581 * isn't perfect because the command could have already completed
1582 * and been re-used, though this is unlikely.
1584 mps_intr_locked(sc);
1585 if (cm->cm_state == MPS_CM_STATE_FREE) {
1586 mpssas_log_command(cm, MPS_XINFO,
1587 "SCSI command %p almost timed out\n", cm);
1591 if (cm->cm_ccb == NULL) {
1592 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1599 mpssas_log_command(cm, MPS_ERROR, "command timeout %d cm %p target "
1600 "%u, handle(0x%04x)\n", cm->cm_ccb->ccb_h.timeout, cm, targ->tid,
1603 /* XXX first, check the firmware state, to see if it's still
1604 * operational. if not, do a diag reset.
1606 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1607 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1608 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1610 if (targ->tm != NULL) {
1611 /* target already in recovery, just queue up another
1612 * timedout command to be processed later.
1614 mps_dprint(sc, MPS_RECOVERY,
1615 "queued timedout cm %p for processing by tm %p\n",
1618 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1619 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1622 /* start recovery by aborting the first timedout command */
1623 mpssas_send_abort(sc, targ->tm, cm);
1626 /* XXX queue this target up for recovery once a TM becomes
1627 * available. The firmware only has a limited number of
1628 * HighPriority credits for the high priority requests used
1629 * for task management, and we ran out.
1631 * Isilon: don't worry about this for now, since we have
1632 * more credits than disks in an enclosure, and limit
1633 * ourselves to one TM per target for recovery.
1635 mps_dprint(sc, MPS_RECOVERY,
1636 "timedout cm %p failed to allocate a tm\n", cm);
1642 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1644 MPI2_SCSI_IO_REQUEST *req;
1645 struct ccb_scsiio *csio;
1646 struct mps_softc *sc;
1647 struct mpssas_target *targ;
1648 struct mpssas_lun *lun;
1649 struct mps_command *cm;
1650 uint8_t i, lba_byte, *ref_tag_addr;
1651 uint16_t eedp_flags;
1652 uint32_t mpi_control;
1656 mtx_assert(&sc->mps_mtx, MA_OWNED);
1659 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1660 ("Target %d out of bounds in XPT_SCSI_IO\n",
1661 csio->ccb_h.target_id));
1662 targ = &sassc->targets[csio->ccb_h.target_id];
1663 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1664 if (targ->handle == 0x0) {
1665 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1666 __func__, csio->ccb_h.target_id);
1667 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1671 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1672 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1673 "supported %u\n", __func__, csio->ccb_h.target_id);
1674 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1679 * Sometimes, it is possible to get a command that is not "In
1680 * Progress" and was actually aborted by the upper layer. Check for
1681 * this here and complete the command without error.
1683 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1684 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1685 "target %u\n", __func__, csio->ccb_h.target_id);
1690 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1691 * that the volume has timed out. We want volumes to be enumerated
1692 * until they are deleted/removed, not just failed.
1694 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1695 if (targ->devinfo == 0)
1696 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1698 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1703 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1704 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1705 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1711 * If target has a reset in progress, freeze the devq and return. The
1712 * devq will be released when the TM reset is finished.
1714 if (targ->flags & MPSSAS_TARGET_INRESET) {
1715 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1716 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1717 __func__, targ->tid);
1718 xpt_freeze_devq(ccb->ccb_h.path, 1);
1723 cm = mps_alloc_command(sc);
1724 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1726 mps_free_command(sc, cm);
1728 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1729 xpt_freeze_simq(sassc->sim, 1);
1730 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1732 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1733 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1738 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1739 bzero(req, sizeof(*req));
1740 req->DevHandle = htole16(targ->handle);
1741 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1743 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1744 req->SenseBufferLength = MPS_SENSE_LEN;
1746 req->ChainOffset = 0;
1747 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1752 req->DataLength = htole32(csio->dxfer_len);
1753 req->BidirectionalDataLength = 0;
1754 req->IoFlags = htole16(csio->cdb_len);
1757 /* Note: BiDirectional transfers are not supported */
1758 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1760 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1761 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1764 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1765 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1769 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1773 if (csio->cdb_len == 32)
1774 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1776 * It looks like the hardware doesn't require an explicit tag
1777 * number for each transaction. SAM Task Management not supported
1780 switch (csio->tag_action) {
1781 case MSG_HEAD_OF_Q_TAG:
1782 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1784 case MSG_ORDERED_Q_TAG:
1785 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1788 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1790 case CAM_TAG_ACTION_NONE:
1791 case MSG_SIMPLE_Q_TAG:
1793 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1796 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1797 req->Control = htole32(mpi_control);
1798 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1799 mps_free_command(sc, cm);
1800 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1805 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1806 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1808 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1809 req->IoFlags = htole16(csio->cdb_len);
1812 * Check if EEDP is supported and enabled. If it is then check if the
1813 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1814 * is formatted for EEDP support. If all of this is true, set CDB up
1815 * for EEDP transfer.
1817 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1818 if (sc->eedp_enabled && eedp_flags) {
1819 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1820 if (lun->lun_id == csio->ccb_h.target_lun) {
1825 if ((lun != NULL) && (lun->eedp_formatted)) {
1826 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1827 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1828 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1829 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1830 req->EEDPFlags = htole16(eedp_flags);
1833 * If CDB less than 32, fill in Primary Ref Tag with
1834 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1835 * already there. Also, set protection bit. FreeBSD
1836 * currently does not support CDBs bigger than 16, but
1837 * the code doesn't hurt, and will be here for the
1840 if (csio->cdb_len != 32) {
1841 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1842 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1843 PrimaryReferenceTag;
1844 for (i = 0; i < 4; i++) {
1846 req->CDB.CDB32[lba_byte + i];
1849 req->CDB.EEDP32.PrimaryReferenceTag =
1850 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1851 req->CDB.EEDP32.PrimaryApplicationTagMask =
1853 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1857 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1858 req->EEDPFlags = htole16(eedp_flags);
1859 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1865 cm->cm_length = csio->dxfer_len;
1866 if (cm->cm_length != 0) {
1868 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1872 cm->cm_sge = &req->SGL;
1873 cm->cm_sglsize = (32 - 24) * 4;
1874 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1875 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1876 cm->cm_complete = mpssas_scsiio_complete;
1877 cm->cm_complete_data = ccb;
1879 cm->cm_lun = csio->ccb_h.target_lun;
1883 * If HBA is a WD and the command is not for a retry, try to build a
1884 * direct I/O message. If failed, or the command is for a retry, send
1885 * the I/O to the IR volume itself.
1887 if (sc->WD_valid_config) {
1888 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1889 mpssas_direct_drive_io(sassc, cm, ccb);
1891 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1895 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1896 mpssas_scsiio_timeout, cm, 0);
1899 targ->outstanding++;
1900 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1901 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1903 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1904 __func__, cm, ccb, targ->outstanding);
1906 mps_map_command(sc, cm);
1911 mps_response_code(struct mps_softc *sc, u8 response_code)
1915 switch (response_code) {
1916 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1917 desc = "task management request completed";
1919 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1920 desc = "invalid frame";
1922 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1923 desc = "task management request not supported";
1925 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1926 desc = "task management request failed";
1928 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1929 desc = "task management request succeeded";
1931 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1932 desc = "invalid lun";
1935 desc = "overlapped tag attempted";
1937 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1938 desc = "task queued, however not sent to target";
1944 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1945 response_code, desc);
1948 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1951 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1952 Mpi2SCSIIOReply_t *mpi_reply)
1956 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1957 MPI2_IOCSTATUS_MASK;
1958 u8 scsi_state = mpi_reply->SCSIState;
1959 u8 scsi_status = mpi_reply->SCSIStatus;
1960 char *desc_ioc_state = NULL;
1961 char *desc_scsi_status = NULL;
1962 char *desc_scsi_state = sc->tmp_string;
1963 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1965 if (log_info == 0x31170000)
1968 switch (ioc_status) {
1969 case MPI2_IOCSTATUS_SUCCESS:
1970 desc_ioc_state = "success";
1972 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1973 desc_ioc_state = "invalid function";
1975 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1976 desc_ioc_state = "scsi recovered error";
1978 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1979 desc_ioc_state = "scsi invalid dev handle";
1981 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1982 desc_ioc_state = "scsi device not there";
1984 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1985 desc_ioc_state = "scsi data overrun";
1987 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1988 desc_ioc_state = "scsi data underrun";
1990 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1991 desc_ioc_state = "scsi io data error";
1993 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1994 desc_ioc_state = "scsi protocol error";
1996 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1997 desc_ioc_state = "scsi task terminated";
1999 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2000 desc_ioc_state = "scsi residual mismatch";
2002 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2003 desc_ioc_state = "scsi task mgmt failed";
2005 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2006 desc_ioc_state = "scsi ioc terminated";
2008 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2009 desc_ioc_state = "scsi ext terminated";
2011 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2012 desc_ioc_state = "eedp guard error";
2014 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2015 desc_ioc_state = "eedp ref tag error";
2017 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2018 desc_ioc_state = "eedp app tag error";
2021 desc_ioc_state = "unknown";
2025 switch (scsi_status) {
2026 case MPI2_SCSI_STATUS_GOOD:
2027 desc_scsi_status = "good";
2029 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2030 desc_scsi_status = "check condition";
2032 case MPI2_SCSI_STATUS_CONDITION_MET:
2033 desc_scsi_status = "condition met";
2035 case MPI2_SCSI_STATUS_BUSY:
2036 desc_scsi_status = "busy";
2038 case MPI2_SCSI_STATUS_INTERMEDIATE:
2039 desc_scsi_status = "intermediate";
2041 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2042 desc_scsi_status = "intermediate condmet";
2044 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2045 desc_scsi_status = "reservation conflict";
2047 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2048 desc_scsi_status = "command terminated";
2050 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2051 desc_scsi_status = "task set full";
2053 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2054 desc_scsi_status = "aca active";
2056 case MPI2_SCSI_STATUS_TASK_ABORTED:
2057 desc_scsi_status = "task aborted";
2060 desc_scsi_status = "unknown";
2064 desc_scsi_state[0] = '\0';
2066 desc_scsi_state = " ";
2067 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2068 strcat(desc_scsi_state, "response info ");
2069 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2070 strcat(desc_scsi_state, "state terminated ");
2071 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2072 strcat(desc_scsi_state, "no status ");
2073 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2074 strcat(desc_scsi_state, "autosense failed ");
2075 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2076 strcat(desc_scsi_state, "autosense valid ");
2078 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2079 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2080 /* We can add more detail about underflow data here
2083 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2084 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2085 desc_scsi_state, scsi_state);
2087 if (sc->mps_debug & MPS_XINFO &&
2088 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2089 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2090 scsi_sense_print(csio);
2091 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2094 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2095 response_info = le32toh(mpi_reply->ResponseInfo);
2096 response_bytes = (u8 *)&response_info;
2097 mps_response_code(sc,response_bytes[0]);
2102 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2104 MPI2_SCSI_IO_REPLY *rep;
2106 struct ccb_scsiio *csio;
2107 struct mpssas_softc *sassc;
2108 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2109 u8 *TLR_bits, TLR_on;
2112 struct mpssas_target *target;
2113 target_id_t target_id;
2116 mps_dprint(sc, MPS_TRACE,
2117 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2118 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2119 cm->cm_targ->outstanding);
2121 callout_stop(&cm->cm_callout);
2122 mtx_assert(&sc->mps_mtx, MA_OWNED);
2125 ccb = cm->cm_complete_data;
2127 target_id = csio->ccb_h.target_id;
2128 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2130 * XXX KDM if the chain allocation fails, does it matter if we do
2131 * the sync and unload here? It is simpler to do it in every case,
2132 * assuming it doesn't cause problems.
2134 if (cm->cm_data != NULL) {
2135 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2136 dir = BUS_DMASYNC_POSTREAD;
2137 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2138 dir = BUS_DMASYNC_POSTWRITE;
2139 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2140 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2143 cm->cm_targ->completed++;
2144 cm->cm_targ->outstanding--;
2145 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2146 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2148 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2149 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2150 if (cm->cm_reply != NULL)
2151 mpssas_log_command(cm, MPS_RECOVERY,
2152 "completed timedout cm %p ccb %p during recovery "
2153 "ioc %x scsi %x state %x xfer %u\n",
2155 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2156 le32toh(rep->TransferCount));
2158 mpssas_log_command(cm, MPS_RECOVERY,
2159 "completed timedout cm %p ccb %p during recovery\n",
2161 } else if (cm->cm_targ->tm != NULL) {
2162 if (cm->cm_reply != NULL)
2163 mpssas_log_command(cm, MPS_RECOVERY,
2164 "completed cm %p ccb %p during recovery "
2165 "ioc %x scsi %x state %x xfer %u\n",
2167 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2168 le32toh(rep->TransferCount));
2170 mpssas_log_command(cm, MPS_RECOVERY,
2171 "completed cm %p ccb %p during recovery\n",
2173 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2174 mpssas_log_command(cm, MPS_RECOVERY,
2175 "reset completed cm %p ccb %p\n",
2179 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2181 * We ran into an error after we tried to map the command,
2182 * so we're getting a callback without queueing the command
2183 * to the hardware. So we set the status here, and it will
2184 * be retained below. We'll go through the "fast path",
2185 * because there can be no reply when we haven't actually
2186 * gone out to the hardware.
2188 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2191 * Currently the only error included in the mask is
2192 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2193 * chain frames. We need to freeze the queue until we get
2194 * a command that completed without this error, which will
2195 * hopefully have some chain frames attached that we can
2196 * use. If we wanted to get smarter about it, we would
2197 * only unfreeze the queue in this condition when we're
2198 * sure that we're getting some chain frames back. That's
2199 * probably unnecessary.
2201 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2202 xpt_freeze_simq(sassc->sim, 1);
2203 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2204 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2205 "freezing SIM queue\n");
2210 * If this is a Start Stop Unit command and it was issued by the driver
2211 * during shutdown, decrement the refcount to account for all of the
2212 * commands that were sent. All SSU commands should be completed before
2213 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2216 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2217 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2221 /* Take the fast path to completion */
2222 if (cm->cm_reply == NULL) {
2223 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2224 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2225 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2227 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2228 ccb->csio.scsi_status = SCSI_STATUS_OK;
2230 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2231 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2232 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2233 mps_dprint(sc, MPS_XINFO,
2234 "Unfreezing SIM queue\n");
2239 * There are two scenarios where the status won't be
2240 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2241 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2243 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2245 * Freeze the dev queue so that commands are
2246 * executed in the correct order after error
2249 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2250 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2252 mps_free_command(sc, cm);
2257 mpssas_log_command(cm, MPS_XINFO,
2258 "ioc %x scsi %x state %x xfer %u\n",
2259 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2260 le32toh(rep->TransferCount));
2263 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2264 * Volume if an error occurred (normal I/O retry). Use the original
2265 * CCB, but set a flag that this will be a retry so that it's sent to
2266 * the original volume. Free the command but reuse the CCB.
2268 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2269 mps_free_command(sc, cm);
2270 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2271 mpssas_action_scsiio(sassc, ccb);
2274 ccb->ccb_h.sim_priv.entries[0].field = 0;
2276 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2277 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2278 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2280 case MPI2_IOCSTATUS_SUCCESS:
2281 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2283 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2284 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2285 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2287 /* Completion failed at the transport level. */
2288 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2289 MPI2_SCSI_STATE_TERMINATED)) {
2290 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2294 /* In a modern packetized environment, an autosense failure
2295 * implies that there's not much else that can be done to
2296 * recover the command.
2298 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2299 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2304 * CAM doesn't care about SAS Response Info data, but if this is
2305 * the state check if TLR should be done. If not, clear the
2306 * TLR_bits for the target.
2308 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2309 ((le32toh(rep->ResponseInfo) &
2310 MPI2_SCSI_RI_MASK_REASONCODE) ==
2311 MPS_SCSI_RI_INVALID_FRAME)) {
2312 sc->mapping_table[target_id].TLR_bits =
2313 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2317 * Intentionally override the normal SCSI status reporting
2318 * for these two cases. These are likely to happen in a
2319 * multi-initiator environment, and we want to make sure that
2320 * CAM retries these commands rather than fail them.
2322 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2323 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2324 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2328 /* Handle normal status and sense */
2329 csio->scsi_status = rep->SCSIStatus;
2330 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2331 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2333 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2335 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2336 int sense_len, returned_sense_len;
2338 returned_sense_len = min(le32toh(rep->SenseCount),
2339 sizeof(struct scsi_sense_data));
2340 if (returned_sense_len < ccb->csio.sense_len)
2341 ccb->csio.sense_resid = ccb->csio.sense_len -
2344 ccb->csio.sense_resid = 0;
2346 sense_len = min(returned_sense_len,
2347 ccb->csio.sense_len - ccb->csio.sense_resid);
2348 bzero(&ccb->csio.sense_data,
2349 sizeof(ccb->csio.sense_data));
2350 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2351 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2355 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2356 * and it's page code 0 (Supported Page List), and there is
2357 * inquiry data, and this is for a sequential access device, and
2358 * the device is an SSP target, and TLR is supported by the
2359 * controller, turn the TLR_bits value ON if page 0x90 is
2362 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2363 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2364 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2365 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2366 (csio->data_ptr != NULL) &&
2367 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2368 (sc->control_TLR) &&
2369 (sc->mapping_table[target_id].device_info &
2370 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2371 vpd_list = (struct scsi_vpd_supported_page_list *)
2373 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2374 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2375 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2376 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2377 csio->cdb_io.cdb_bytes[4];
2378 alloc_len -= csio->resid;
2379 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2380 if (vpd_list->list[i] == 0x90) {
2388 * If this is a SATA direct-access end device, mark it so that
2389 * a SCSI StartStopUnit command will be sent to it when the
2390 * driver is being shutdown.
2392 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2393 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2394 (sc->mapping_table[target_id].device_info &
2395 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2396 ((sc->mapping_table[target_id].device_info &
2397 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2398 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2399 target = &sassc->targets[target_id];
2400 target->supports_SSU = TRUE;
2401 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2405 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2406 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2408 * If devinfo is 0 this will be a volume. In that case don't
2409 * tell CAM that the volume is not there. We want volumes to
2410 * be enumerated until they are deleted/removed, not just
2413 if (cm->cm_targ->devinfo == 0)
2414 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2416 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2418 case MPI2_IOCSTATUS_INVALID_SGL:
2419 mps_print_scsiio_cmd(sc, cm);
2420 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2422 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2424 * This is one of the responses that comes back when an I/O
2425 * has been aborted. If it is because of a timeout that we
2426 * initiated, just set the status to CAM_CMD_TIMEOUT.
2427 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2428 * command is the same (it gets retried, subject to the
2429 * retry counter), the only difference is what gets printed
2432 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2433 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2435 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2437 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2438 /* resid is ignored for this condition */
2440 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2442 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2443 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2445 * These can sometimes be transient transport-related
2446 * errors, and sometimes persistent drive-related errors.
2447 * We used to retry these without decrementing the retry
2448 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2449 * we hit a persistent drive problem that returns one of
2450 * these error codes, we would retry indefinitely. So,
2451 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2452 * count and avoid infinite retries. We're taking the
2453 * potential risk of flagging false failures in the event
2454 * of a topology-related error (e.g. a SAS expander problem
2455 * causes a command addressed to a drive to fail), but
2456 * avoiding getting into an infinite retry loop.
2458 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2459 mpssas_log_command(cm, MPS_INFO,
2460 "terminated ioc %x loginfo %x scsi %x state %x xfer %u\n",
2461 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2462 rep->SCSIStatus, rep->SCSIState,
2463 le32toh(rep->TransferCount));
2465 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2466 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2467 case MPI2_IOCSTATUS_INVALID_VPID:
2468 case MPI2_IOCSTATUS_INVALID_FIELD:
2469 case MPI2_IOCSTATUS_INVALID_STATE:
2470 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2471 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2472 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2473 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2474 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2476 mpssas_log_command(cm, MPS_XINFO,
2477 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2478 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2479 rep->SCSIStatus, rep->SCSIState,
2480 le32toh(rep->TransferCount));
2481 csio->resid = cm->cm_length;
2482 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2486 mps_sc_failed_io_info(sc,csio,rep);
2488 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2489 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2490 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2491 mps_dprint(sc, MPS_XINFO, "Command completed, "
2492 "unfreezing SIM queue\n");
2495 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2496 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2497 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2500 mps_free_command(sc, cm);
2504 /* All Request reached here are Endian safe */
2506 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2508 pMpi2SCSIIORequest_t pIO_req;
2509 struct mps_softc *sc = sassc->sc;
2511 uint32_t physLBA, stripe_offset, stripe_unit;
2512 uint32_t io_size, column;
2513 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2516 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2517 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2518 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2519 * bit different than the 10/16 CDBs, handle them separately.
2521 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2522 CDB = pIO_req->CDB.CDB32;
2525 * Handle 6 byte CDBs.
2527 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2528 (CDB[0] == WRITE_6))) {
2530 * Get the transfer size in blocks.
2532 io_size = (cm->cm_length >> sc->DD_block_exponent);
2535 * Get virtual LBA given in the CDB.
2537 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2538 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2541 * Check that LBA range for I/O does not exceed volume's
2544 if ((virtLBA + (uint64_t)io_size - 1) <=
2547 * Check if the I/O crosses a stripe boundary. If not,
2548 * translate the virtual LBA to a physical LBA and set
2549 * the DevHandle for the PhysDisk to be used. If it
2550 * does cross a boundry, do normal I/O. To get the
2551 * right DevHandle to use, get the map number for the
2552 * column, then use that map number to look up the
2553 * DevHandle of the PhysDisk.
2555 stripe_offset = (uint32_t)virtLBA &
2556 (sc->DD_stripe_size - 1);
2557 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2558 physLBA = (uint32_t)virtLBA >>
2559 sc->DD_stripe_exponent;
2560 stripe_unit = physLBA / sc->DD_num_phys_disks;
2561 column = physLBA % sc->DD_num_phys_disks;
2562 pIO_req->DevHandle =
2563 htole16(sc->DD_column_map[column].dev_handle);
2564 /* ???? Is this endian safe*/
2565 cm->cm_desc.SCSIIO.DevHandle =
2568 physLBA = (stripe_unit <<
2569 sc->DD_stripe_exponent) + stripe_offset;
2570 ptrLBA = &pIO_req->CDB.CDB32[1];
2571 physLBA_byte = (uint8_t)(physLBA >> 16);
2572 *ptrLBA = physLBA_byte;
2573 ptrLBA = &pIO_req->CDB.CDB32[2];
2574 physLBA_byte = (uint8_t)(physLBA >> 8);
2575 *ptrLBA = physLBA_byte;
2576 ptrLBA = &pIO_req->CDB.CDB32[3];
2577 physLBA_byte = (uint8_t)physLBA;
2578 *ptrLBA = physLBA_byte;
2581 * Set flag that Direct Drive I/O is
2584 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2591 * Handle 10, 12 or 16 byte CDBs.
2593 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2594 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2595 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2596 (CDB[0] == WRITE_12))) {
2598 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2599 * are 0. If not, this is accessing beyond 2TB so handle it in
2600 * the else section. 10-byte and 12-byte CDB's are OK.
2601 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2602 * ready to accept 12byte CDB for Direct IOs.
2604 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2605 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2606 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2608 * Get the transfer size in blocks.
2610 io_size = (cm->cm_length >> sc->DD_block_exponent);
2613 * Get virtual LBA. Point to correct lower 4 bytes of
2614 * LBA in the CDB depending on command.
2616 lba_idx = ((CDB[0] == READ_12) ||
2617 (CDB[0] == WRITE_12) ||
2618 (CDB[0] == READ_10) ||
2619 (CDB[0] == WRITE_10))? 2 : 6;
2620 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2621 ((uint64_t)CDB[lba_idx + 1] << 16) |
2622 ((uint64_t)CDB[lba_idx + 2] << 8) |
2623 (uint64_t)CDB[lba_idx + 3];
2626 * Check that LBA range for I/O does not exceed volume's
2629 if ((virtLBA + (uint64_t)io_size - 1) <=
2632 * Check if the I/O crosses a stripe boundary.
2633 * If not, translate the virtual LBA to a
2634 * physical LBA and set the DevHandle for the
2635 * PhysDisk to be used. If it does cross a
2636 * boundry, do normal I/O. To get the right
2637 * DevHandle to use, get the map number for the
2638 * column, then use that map number to look up
2639 * the DevHandle of the PhysDisk.
2641 stripe_offset = (uint32_t)virtLBA &
2642 (sc->DD_stripe_size - 1);
2643 if ((stripe_offset + io_size) <=
2644 sc->DD_stripe_size) {
2645 physLBA = (uint32_t)virtLBA >>
2646 sc->DD_stripe_exponent;
2647 stripe_unit = physLBA /
2648 sc->DD_num_phys_disks;
2650 sc->DD_num_phys_disks;
2651 pIO_req->DevHandle =
2652 htole16(sc->DD_column_map[column].
2654 cm->cm_desc.SCSIIO.DevHandle =
2657 physLBA = (stripe_unit <<
2658 sc->DD_stripe_exponent) +
2661 &pIO_req->CDB.CDB32[lba_idx];
2662 physLBA_byte = (uint8_t)(physLBA >> 24);
2663 *ptrLBA = physLBA_byte;
2665 &pIO_req->CDB.CDB32[lba_idx + 1];
2666 physLBA_byte = (uint8_t)(physLBA >> 16);
2667 *ptrLBA = physLBA_byte;
2669 &pIO_req->CDB.CDB32[lba_idx + 2];
2670 physLBA_byte = (uint8_t)(physLBA >> 8);
2671 *ptrLBA = physLBA_byte;
2673 &pIO_req->CDB.CDB32[lba_idx + 3];
2674 physLBA_byte = (uint8_t)physLBA;
2675 *ptrLBA = physLBA_byte;
2678 * Set flag that Direct Drive I/O is
2681 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2686 * 16-byte CDB and the upper 4 bytes of the CDB are not
2687 * 0. Get the transfer size in blocks.
2689 io_size = (cm->cm_length >> sc->DD_block_exponent);
2694 virtLBA = ((uint64_t)CDB[2] << 54) |
2695 ((uint64_t)CDB[3] << 48) |
2696 ((uint64_t)CDB[4] << 40) |
2697 ((uint64_t)CDB[5] << 32) |
2698 ((uint64_t)CDB[6] << 24) |
2699 ((uint64_t)CDB[7] << 16) |
2700 ((uint64_t)CDB[8] << 8) |
2704 * Check that LBA range for I/O does not exceed volume's
2707 if ((virtLBA + (uint64_t)io_size - 1) <=
2710 * Check if the I/O crosses a stripe boundary.
2711 * If not, translate the virtual LBA to a
2712 * physical LBA and set the DevHandle for the
2713 * PhysDisk to be used. If it does cross a
2714 * boundry, do normal I/O. To get the right
2715 * DevHandle to use, get the map number for the
2716 * column, then use that map number to look up
2717 * the DevHandle of the PhysDisk.
2719 stripe_offset = (uint32_t)virtLBA &
2720 (sc->DD_stripe_size - 1);
2721 if ((stripe_offset + io_size) <=
2722 sc->DD_stripe_size) {
2723 physLBA = (uint32_t)(virtLBA >>
2724 sc->DD_stripe_exponent);
2725 stripe_unit = physLBA /
2726 sc->DD_num_phys_disks;
2728 sc->DD_num_phys_disks;
2729 pIO_req->DevHandle =
2730 htole16(sc->DD_column_map[column].
2732 cm->cm_desc.SCSIIO.DevHandle =
2735 physLBA = (stripe_unit <<
2736 sc->DD_stripe_exponent) +
2740 * Set upper 4 bytes of LBA to 0. We
2741 * assume that the phys disks are less
2742 * than 2 TB's in size. Then, set the
2745 pIO_req->CDB.CDB32[2] = 0;
2746 pIO_req->CDB.CDB32[3] = 0;
2747 pIO_req->CDB.CDB32[4] = 0;
2748 pIO_req->CDB.CDB32[5] = 0;
2749 ptrLBA = &pIO_req->CDB.CDB32[6];
2750 physLBA_byte = (uint8_t)(physLBA >> 24);
2751 *ptrLBA = physLBA_byte;
2752 ptrLBA = &pIO_req->CDB.CDB32[7];
2753 physLBA_byte = (uint8_t)(physLBA >> 16);
2754 *ptrLBA = physLBA_byte;
2755 ptrLBA = &pIO_req->CDB.CDB32[8];
2756 physLBA_byte = (uint8_t)(physLBA >> 8);
2757 *ptrLBA = physLBA_byte;
2758 ptrLBA = &pIO_req->CDB.CDB32[9];
2759 physLBA_byte = (uint8_t)physLBA;
2760 *ptrLBA = physLBA_byte;
2763 * Set flag that Direct Drive I/O is
2766 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2773 #if __FreeBSD_version >= 900026
2775 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2777 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2778 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2782 ccb = cm->cm_complete_data;
2785 * Currently there should be no way we can hit this case. It only
2786 * happens when we have a failure to allocate chain frames, and SMP
2787 * commands require two S/G elements only. That should be handled
2788 * in the standard request size.
2790 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2791 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2792 __func__, cm->cm_flags);
2793 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2797 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2799 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2800 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2804 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2805 sasaddr = le32toh(req->SASAddress.Low);
2806 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2808 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2809 MPI2_IOCSTATUS_SUCCESS ||
2810 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2811 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2812 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2813 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2817 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2818 "%#jx completed successfully\n", __func__,
2819 (uintmax_t)sasaddr);
2821 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2822 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2824 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2828 * We sync in both directions because we had DMAs in the S/G list
2829 * in both directions.
2831 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2832 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2833 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2834 mps_free_command(sc, cm);
2839 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2841 struct mps_command *cm;
2842 uint8_t *request, *response;
2843 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2844 struct mps_softc *sc;
2851 * XXX We don't yet support physical addresses here.
2853 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2854 case CAM_DATA_PADDR:
2855 case CAM_DATA_SG_PADDR:
2856 mps_dprint(sc, MPS_ERROR,
2857 "%s: physical addresses not supported\n", __func__);
2858 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2863 * The chip does not support more than one buffer for the
2864 * request or response.
2866 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2867 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2868 mps_dprint(sc, MPS_ERROR,
2869 "%s: multiple request or response "
2870 "buffer segments not supported for SMP\n",
2872 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2878 * The CAM_SCATTER_VALID flag was originally implemented
2879 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2880 * We have two. So, just take that flag to mean that we
2881 * might have S/G lists, and look at the S/G segment count
2882 * to figure out whether that is the case for each individual
2885 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2886 bus_dma_segment_t *req_sg;
2888 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2889 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2891 request = ccb->smpio.smp_request;
2893 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2894 bus_dma_segment_t *rsp_sg;
2896 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2897 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2899 response = ccb->smpio.smp_response;
2901 case CAM_DATA_VADDR:
2902 request = ccb->smpio.smp_request;
2903 response = ccb->smpio.smp_response;
2906 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2911 cm = mps_alloc_command(sc);
2913 mps_dprint(sc, MPS_ERROR,
2914 "%s: cannot allocate command\n", __func__);
2915 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2920 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2921 bzero(req, sizeof(*req));
2922 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2924 /* Allow the chip to use any route to this SAS address. */
2925 req->PhysicalPort = 0xff;
2927 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2929 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2931 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2932 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2934 mpi_init_sge(cm, req, &req->SGL);
2937 * Set up a uio to pass into mps_map_command(). This allows us to
2938 * do one map command, and one busdma call in there.
2940 cm->cm_uio.uio_iov = cm->cm_iovec;
2941 cm->cm_uio.uio_iovcnt = 2;
2942 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2945 * The read/write flag isn't used by busdma, but set it just in
2946 * case. This isn't exactly accurate, either, since we're going in
2949 cm->cm_uio.uio_rw = UIO_WRITE;
2951 cm->cm_iovec[0].iov_base = request;
2952 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2953 cm->cm_iovec[1].iov_base = response;
2954 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2956 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2957 cm->cm_iovec[1].iov_len;
2960 * Trigger a warning message in mps_data_cb() for the user if we
2961 * wind up exceeding two S/G segments. The chip expects one
2962 * segment for the request and another for the response.
2964 cm->cm_max_segs = 2;
2966 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2967 cm->cm_complete = mpssas_smpio_complete;
2968 cm->cm_complete_data = ccb;
2971 * Tell the mapping code that we're using a uio, and that this is
2972 * an SMP passthrough request. There is a little special-case
2973 * logic there (in mps_data_cb()) to handle the bidirectional
2976 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2977 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2979 /* The chip data format is little endian. */
2980 req->SASAddress.High = htole32(sasaddr >> 32);
2981 req->SASAddress.Low = htole32(sasaddr);
2984 * XXX Note that we don't have a timeout/abort mechanism here.
2985 * From the manual, it looks like task management requests only
2986 * work for SCSI IO and SATA passthrough requests. We may need to
2987 * have a mechanism to retry requests in the event of a chip reset
2988 * at least. Hopefully the chip will insure that any errors short
2989 * of that are relayed back to the driver.
2991 error = mps_map_command(sc, cm);
2992 if ((error != 0) && (error != EINPROGRESS)) {
2993 mps_dprint(sc, MPS_ERROR,
2994 "%s: error %d returned from mps_map_command()\n",
3002 mps_free_command(sc, cm);
3003 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3010 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
3012 struct mps_softc *sc;
3013 struct mpssas_target *targ;
3014 uint64_t sasaddr = 0;
3019 * Make sure the target exists.
3021 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3022 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3023 targ = &sassc->targets[ccb->ccb_h.target_id];
3024 if (targ->handle == 0x0) {
3025 mps_dprint(sc, MPS_ERROR,
3026 "%s: target %d does not exist!\n", __func__,
3027 ccb->ccb_h.target_id);
3028 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3034 * If this device has an embedded SMP target, we'll talk to it
3036 * figure out what the expander's address is.
3038 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3039 sasaddr = targ->sasaddr;
3042 * If we don't have a SAS address for the expander yet, try
3043 * grabbing it from the page 0x83 information cached in the
3044 * transport layer for this target. LSI expanders report the
3045 * expander SAS address as the port-associated SAS address in
3046 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3049 * XXX KDM disable this for now, but leave it commented out so that
3050 * it is obvious that this is another possible way to get the SAS
3053 * The parent handle method below is a little more reliable, and
3054 * the other benefit is that it works for devices other than SES
3055 * devices. So you can send a SMP request to a da(4) device and it
3056 * will get routed to the expander that device is attached to.
3057 * (Assuming the da(4) device doesn't contain an SMP target...)
3061 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3065 * If we still don't have a SAS address for the expander, look for
3066 * the parent device of this device, which is probably the expander.
3069 #ifdef OLD_MPS_PROBE
3070 struct mpssas_target *parent_target;
3073 if (targ->parent_handle == 0x0) {
3074 mps_dprint(sc, MPS_ERROR,
3075 "%s: handle %d does not have a valid "
3076 "parent handle!\n", __func__, targ->handle);
3077 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3080 #ifdef OLD_MPS_PROBE
3081 parent_target = mpssas_find_target_by_handle(sassc, 0,
3082 targ->parent_handle);
3084 if (parent_target == NULL) {
3085 mps_dprint(sc, MPS_ERROR,
3086 "%s: handle %d does not have a valid "
3087 "parent target!\n", __func__, targ->handle);
3088 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3092 if ((parent_target->devinfo &
3093 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3094 mps_dprint(sc, MPS_ERROR,
3095 "%s: handle %d parent %d does not "
3096 "have an SMP target!\n", __func__,
3097 targ->handle, parent_target->handle);
3098 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3103 sasaddr = parent_target->sasaddr;
3104 #else /* OLD_MPS_PROBE */
3105 if ((targ->parent_devinfo &
3106 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3107 mps_dprint(sc, MPS_ERROR,
3108 "%s: handle %d parent %d does not "
3109 "have an SMP target!\n", __func__,
3110 targ->handle, targ->parent_handle);
3111 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3115 if (targ->parent_sasaddr == 0x0) {
3116 mps_dprint(sc, MPS_ERROR,
3117 "%s: handle %d parent handle %d does "
3118 "not have a valid SAS address!\n",
3119 __func__, targ->handle, targ->parent_handle);
3120 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3124 sasaddr = targ->parent_sasaddr;
3125 #endif /* OLD_MPS_PROBE */
3130 mps_dprint(sc, MPS_INFO,
3131 "%s: unable to find SAS address for handle %d\n",
3132 __func__, targ->handle);
3133 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3136 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3144 #endif //__FreeBSD_version >= 900026
3147 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3149 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3150 struct mps_softc *sc;
3151 struct mps_command *tm;
3152 struct mpssas_target *targ;
3154 MPS_FUNCTRACE(sassc->sc);
3155 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3157 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3158 ("Target %d out of bounds in XPT_RESET_DEV\n",
3159 ccb->ccb_h.target_id));
3161 tm = mps_alloc_command(sc);
3163 mps_dprint(sc, MPS_ERROR,
3164 "command alloc failure in mpssas_action_resetdev\n");
3165 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3170 targ = &sassc->targets[ccb->ccb_h.target_id];
3171 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3172 req->DevHandle = htole16(targ->handle);
3173 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3174 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3176 /* SAS Hard Link Reset / SATA Link Reset */
3177 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3180 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3181 tm->cm_complete = mpssas_resetdev_complete;
3182 tm->cm_complete_data = ccb;
3184 targ->flags |= MPSSAS_TARGET_INRESET;
3186 mps_map_command(sc, tm);
3190 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3192 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3196 mtx_assert(&sc->mps_mtx, MA_OWNED);
3198 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3199 ccb = tm->cm_complete_data;
3202 * Currently there should be no way we can hit this case. It only
3203 * happens when we have a failure to allocate chain frames, and
3204 * task management commands don't have S/G lists.
3206 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3207 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3209 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3211 mps_dprint(sc, MPS_ERROR,
3212 "%s: cm_flags = %#x for reset of handle %#04x! "
3213 "This should not happen!\n", __func__, tm->cm_flags,
3215 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3219 mps_dprint(sc, MPS_XINFO,
3220 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3221 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3223 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3224 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3225 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3229 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3233 mpssas_free_tm(sc, tm);
3238 mpssas_poll(struct cam_sim *sim)
3240 struct mpssas_softc *sassc;
3242 sassc = cam_sim_softc(sim);
3244 if (sassc->sc->mps_debug & MPS_TRACE) {
3245 /* frequent debug messages during a panic just slow
3246 * everything down too much.
3248 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3249 sassc->sc->mps_debug &= ~MPS_TRACE;
3252 mps_intr_locked(sassc->sc);
3256 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3259 struct mps_softc *sc;
3261 sc = (struct mps_softc *)callback_arg;
3264 #if (__FreeBSD_version >= 1000006) || \
3265 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3266 case AC_ADVINFO_CHANGED: {
3267 struct mpssas_target *target;
3268 struct mpssas_softc *sassc;
3269 struct scsi_read_capacity_data_long rcap_buf;
3270 struct ccb_dev_advinfo cdai;
3271 struct mpssas_lun *lun;
3276 buftype = (uintptr_t)arg;
3282 * We're only interested in read capacity data changes.
3284 if (buftype != CDAI_TYPE_RCAPLONG)
3288 * We should have a handle for this, but check to make sure.
3290 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3291 ("Target %d out of bounds in mpssas_async\n",
3292 xpt_path_target_id(path)));
3293 target = &sassc->targets[xpt_path_target_id(path)];
3294 if (target->handle == 0)
3297 lunid = xpt_path_lun_id(path);
3299 SLIST_FOREACH(lun, &target->luns, lun_link) {
3300 if (lun->lun_id == lunid) {
3306 if (found_lun == 0) {
3307 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3310 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3311 "LUN for EEDP support.\n");
3314 lun->lun_id = lunid;
3315 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3318 bzero(&rcap_buf, sizeof(rcap_buf));
3319 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3320 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3321 cdai.ccb_h.flags = CAM_DIR_IN;
3322 cdai.buftype = CDAI_TYPE_RCAPLONG;
3323 #if (__FreeBSD_version >= 1100061) || \
3324 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3325 cdai.flags = CDAI_FLAG_NONE;
3329 cdai.bufsiz = sizeof(rcap_buf);
3330 cdai.buf = (uint8_t *)&rcap_buf;
3331 xpt_action((union ccb *)&cdai);
3332 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3333 cam_release_devq(cdai.ccb_h.path,
3336 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3337 && (rcap_buf.prot & SRC16_PROT_EN)) {
3338 lun->eedp_formatted = TRUE;
3339 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3341 lun->eedp_formatted = FALSE;
3342 lun->eedp_block_size = 0;
3347 case AC_FOUND_DEVICE: {
3348 struct ccb_getdev *cgd;
3351 mpssas_check_eedp(sc, path, cgd);
3360 #if (__FreeBSD_version < 901503) || \
3361 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3363 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3364 struct ccb_getdev *cgd)
3366 struct mpssas_softc *sassc = sc->sassc;
3367 struct ccb_scsiio *csio;
3368 struct scsi_read_capacity_16 *scsi_cmd;
3369 struct scsi_read_capacity_eedp *rcap_buf;
3371 target_id_t targetid;
3374 struct cam_path *local_path;
3375 struct mpssas_target *target;
3376 struct mpssas_lun *lun;
3381 pathid = cam_sim_path(sassc->sim);
3382 targetid = xpt_path_target_id(path);
3383 lunid = xpt_path_lun_id(path);
3385 KASSERT(targetid < sassc->maxtargets,
3386 ("Target %d out of bounds in mpssas_check_eedp\n",
3388 target = &sassc->targets[targetid];
3389 if (target->handle == 0x0)
3393 * Determine if the device is EEDP capable.
3395 * If this flag is set in the inquiry data,
3396 * the device supports protection information,
3397 * and must support the 16 byte read
3398 * capacity command, otherwise continue without
3399 * sending read cap 16
3401 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3405 * Issue a READ CAPACITY 16 command. This info
3406 * is used to determine if the LUN is formatted
3409 ccb = xpt_alloc_ccb_nowait();
3411 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3412 "for EEDP support.\n");
3416 if (xpt_create_path(&local_path, xpt_periph,
3417 pathid, targetid, lunid) != CAM_REQ_CMP) {
3418 mps_dprint(sc, MPS_ERROR, "Unable to create "
3419 "path for EEDP support\n");
3425 * If LUN is already in list, don't create a new
3429 SLIST_FOREACH(lun, &target->luns, lun_link) {
3430 if (lun->lun_id == lunid) {
3436 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3439 mps_dprint(sc, MPS_ERROR,
3440 "Unable to alloc LUN for EEDP support.\n");
3441 xpt_free_path(local_path);
3445 lun->lun_id = lunid;
3446 SLIST_INSERT_HEAD(&target->luns, lun,
3450 xpt_path_string(local_path, path_str, sizeof(path_str));
3452 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3453 path_str, target->handle);
3456 * Issue a READ CAPACITY 16 command for the LUN.
3457 * The mpssas_read_cap_done function will load
3458 * the read cap info into the LUN struct.
3460 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3461 M_MPT2, M_NOWAIT | M_ZERO);
3462 if (rcap_buf == NULL) {
3463 mps_dprint(sc, MPS_FAULT,
3464 "Unable to alloc read capacity buffer for EEDP support.\n");
3465 xpt_free_path(ccb->ccb_h.path);
3469 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3471 csio->ccb_h.func_code = XPT_SCSI_IO;
3472 csio->ccb_h.flags = CAM_DIR_IN;
3473 csio->ccb_h.retry_count = 4;
3474 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3475 csio->ccb_h.timeout = 60000;
3476 csio->data_ptr = (uint8_t *)rcap_buf;
3477 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3478 csio->sense_len = MPS_SENSE_LEN;
3479 csio->cdb_len = sizeof(*scsi_cmd);
3480 csio->tag_action = MSG_SIMPLE_Q_TAG;
3482 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3483 bzero(scsi_cmd, sizeof(*scsi_cmd));
3484 scsi_cmd->opcode = 0x9E;
3485 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3486 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3488 ccb->ccb_h.ppriv_ptr1 = sassc;
3493 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3495 struct mpssas_softc *sassc;
3496 struct mpssas_target *target;
3497 struct mpssas_lun *lun;
3498 struct scsi_read_capacity_eedp *rcap_buf;
3500 if (done_ccb == NULL)
3503 /* Driver need to release devq, it Scsi command is
3504 * generated by driver internally.
3505 * Currently there is a single place where driver
3506 * calls scsi command internally. In future if driver
3507 * calls more scsi command internally, it needs to release
3508 * devq internally, since those command will not go back to
3511 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3512 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3513 xpt_release_devq(done_ccb->ccb_h.path,
3514 /*count*/ 1, /*run_queue*/TRUE);
3517 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3520 * Get the LUN ID for the path and look it up in the LUN list for the
3523 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3524 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3525 ("Target %d out of bounds in mpssas_read_cap_done\n",
3526 done_ccb->ccb_h.target_id));
3527 target = &sassc->targets[done_ccb->ccb_h.target_id];
3528 SLIST_FOREACH(lun, &target->luns, lun_link) {
3529 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3533 * Got the LUN in the target's LUN list. Fill it in
3534 * with EEDP info. If the READ CAP 16 command had some
3535 * SCSI error (common if command is not supported), mark
3536 * the lun as not supporting EEDP and set the block size
3539 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3540 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3541 lun->eedp_formatted = FALSE;
3542 lun->eedp_block_size = 0;
3546 if (rcap_buf->protect & 0x01) {
3547 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3548 "target ID %d is formatted for EEDP "
3549 "support.\n", done_ccb->ccb_h.target_lun,
3550 done_ccb->ccb_h.target_id);
3551 lun->eedp_formatted = TRUE;
3552 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3557 // Finished with this CCB and path.
3558 free(rcap_buf, M_MPT2);
3559 xpt_free_path(done_ccb->ccb_h.path);
3560 xpt_free_ccb(done_ccb);
3562 #endif /* (__FreeBSD_version < 901503) || \
3563 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3566 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3567 struct mpssas_target *target, lun_id_t lun_id)
3573 * Set the INRESET flag for this target so that no I/O will be sent to
3574 * the target until the reset has completed. If an I/O request does
3575 * happen, the devq will be frozen. The CCB holds the path which is
3576 * used to release the devq. The devq is released and the CCB is freed
3577 * when the TM completes.
3579 ccb = xpt_alloc_ccb_nowait();
3581 path_id = cam_sim_path(sc->sassc->sim);
3582 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3583 target->tid, lun_id) != CAM_REQ_CMP) {
3587 tm->cm_targ = target;
3588 target->flags |= MPSSAS_TARGET_INRESET;
3594 mpssas_startup(struct mps_softc *sc)
3598 * Send the port enable message and set the wait_for_port_enable flag.
3599 * This flag helps to keep the simq frozen until all discovery events
3602 sc->wait_for_port_enable = 1;
3603 mpssas_send_portenable(sc);
3608 mpssas_send_portenable(struct mps_softc *sc)
3610 MPI2_PORT_ENABLE_REQUEST *request;
3611 struct mps_command *cm;
3615 if ((cm = mps_alloc_command(sc)) == NULL)
3617 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3618 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3619 request->MsgFlags = 0;
3621 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3622 cm->cm_complete = mpssas_portenable_complete;
3626 mps_map_command(sc, cm);
3627 mps_dprint(sc, MPS_XINFO,
3628 "mps_send_portenable finished cm %p req %p complete %p\n",
3629 cm, cm->cm_req, cm->cm_complete);
3634 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3636 MPI2_PORT_ENABLE_REPLY *reply;
3637 struct mpssas_softc *sassc;
3643 * Currently there should be no way we can hit this case. It only
3644 * happens when we have a failure to allocate chain frames, and
3645 * port enable commands don't have S/G lists.
3647 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3648 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3649 "This should not happen!\n", __func__, cm->cm_flags);
3652 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3654 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3655 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3656 MPI2_IOCSTATUS_SUCCESS)
3657 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3659 mps_free_command(sc, cm);
3660 if (sc->mps_ich.ich_arg != NULL) {
3661 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3662 config_intrhook_disestablish(&sc->mps_ich);
3663 sc->mps_ich.ich_arg = NULL;
3667 * Get WarpDrive info after discovery is complete but before the scan
3668 * starts. At this point, all devices are ready to be exposed to the
3669 * OS. If devices should be hidden instead, take them out of the
3670 * 'targets' array before the scan. The devinfo for a disk will have
3671 * some info and a volume's will be 0. Use that to remove disks.
3673 mps_wd_config_pages(sc);
3676 * Done waiting for port enable to complete. Decrement the refcount.
3677 * If refcount is 0, discovery is complete and a rescan of the bus can
3678 * take place. Since the simq was explicitly frozen before port
3679 * enable, it must be explicitly released here to keep the
3680 * freeze/release count in sync.
3682 sc->wait_for_port_enable = 0;
3683 sc->port_enable_complete = 1;
3684 wakeup(&sc->port_enable_complete);
3685 mpssas_startup_decrement(sassc);
3689 mpssas_check_id(struct mpssas_softc *sassc, int id)
3691 struct mps_softc *sc = sassc->sc;
3695 ids = &sc->exclude_ids[0];
3696 while((name = strsep(&ids, ",")) != NULL) {
3697 if (name[0] == '\0')
3699 if (strtol(name, NULL, 0) == (long)id)
3707 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3709 struct mpssas_softc *sassc;
3710 struct mpssas_lun *lun, *lun_tmp;
3711 struct mpssas_target *targ;
3716 * The number of targets is based on IOC Facts, so free all of
3717 * the allocated LUNs for each target and then the target buffer
3720 for (i=0; i< maxtargets; i++) {
3721 targ = &sassc->targets[i];
3722 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3726 free(sassc->targets, M_MPT2);
3728 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3729 M_MPT2, M_WAITOK|M_ZERO);
3730 if (!sassc->targets) {
3731 panic("%s failed to alloc targets with error %d\n",