2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * LSI MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for LSI MPT2 */
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
88 #define MPSSAS_DISCOVERY_TIMEOUT 20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
119 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
120 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
121 static void mpssas_poll(struct cam_sim *sim);
122 static void mpssas_scsiio_timeout(void *data);
123 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
124 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
125 struct mps_command *cm, union ccb *ccb);
126 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
127 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
128 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
129 #if __FreeBSD_version >= 900026
130 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
131 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
134 #endif //FreeBSD_version >= 900026
135 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
136 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
137 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
138 static void mpssas_async(void *callback_arg, uint32_t code,
139 struct cam_path *path, void *arg);
140 #if (__FreeBSD_version < 901503) || \
141 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
142 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
143 struct ccb_getdev *cgd);
144 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 static int mpssas_send_portenable(struct mps_softc *sc);
147 static void mpssas_portenable_complete(struct mps_softc *sc,
148 struct mps_command *cm);
150 struct mpssas_target *
151 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 struct mpssas_target *target;
156 for (i = start; i < sassc->maxtargets; i++) {
157 target = &sassc->targets[i];
158 if (target->handle == handle)
165 /* we need to freeze the simq during attach and diag reset, to avoid failing
166 * commands before device handles have been found by discovery. Since
167 * discovery involves reading config pages and possibly sending commands,
168 * discovery actions may continue even after we receive the end of discovery
169 * event, so refcount discovery actions instead of assuming we can unfreeze
170 * the simq when we get the event.
173 mpssas_startup_increment(struct mpssas_softc *sassc)
175 MPS_FUNCTRACE(sassc->sc);
177 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
178 if (sassc->startup_refcount++ == 0) {
179 /* just starting, freeze the simq */
180 mps_dprint(sassc->sc, MPS_INIT,
181 "%s freezing simq\n", __func__);
182 #if __FreeBSD_version >= 1000039
185 xpt_freeze_simq(sassc->sim, 1);
187 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
188 sassc->startup_refcount);
193 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
196 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
197 xpt_release_simq(sassc->sim, 1);
198 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
203 mpssas_startup_decrement(struct mpssas_softc *sassc)
205 MPS_FUNCTRACE(sassc->sc);
207 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
208 if (--sassc->startup_refcount == 0) {
209 /* finished all discovery-related actions, release
210 * the simq and rescan for the latest topology.
212 mps_dprint(sassc->sc, MPS_INIT,
213 "%s releasing simq\n", __func__);
214 sassc->flags &= ~MPSSAS_IN_STARTUP;
215 xpt_release_simq(sassc->sim, 1);
216 #if __FreeBSD_version >= 1000039
219 mpssas_rescan_target(sassc->sc, NULL);
222 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
223 sassc->startup_refcount);
227 /* LSI's firmware requires us to stop sending commands when we're doing task
228 * management, so refcount the TMs and keep the simq frozen when any are in
232 mpssas_alloc_tm(struct mps_softc *sc)
234 struct mps_command *tm;
237 tm = mps_alloc_high_priority_command(sc);
239 if (sc->sassc->tm_count++ == 0) {
240 mps_dprint(sc, MPS_RECOVERY,
241 "%s freezing simq\n", __func__);
242 xpt_freeze_simq(sc->sassc->sim, 1);
244 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
245 sc->sassc->tm_count);
251 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
253 mps_dprint(sc, MPS_TRACE, "%s", __func__);
257 /* if there are no TMs in use, we can release the simq. We use our
258 * own refcount so that it's easier for a diag reset to cleanup and
261 if (--sc->sassc->tm_count == 0) {
262 mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
263 xpt_release_simq(sc->sassc->sim, 1);
265 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
266 sc->sassc->tm_count);
268 mps_free_high_priority_command(sc, tm);
272 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
274 struct mpssas_softc *sassc = sc->sassc;
276 target_id_t targetid;
280 pathid = cam_sim_path(sassc->sim);
282 targetid = CAM_TARGET_WILDCARD;
284 targetid = targ - sassc->targets;
287 * Allocate a CCB and schedule a rescan.
289 ccb = xpt_alloc_ccb_nowait();
291 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
295 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
296 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
302 if (targetid == CAM_TARGET_WILDCARD)
303 ccb->ccb_h.func_code = XPT_SCAN_BUS;
305 ccb->ccb_h.func_code = XPT_SCAN_TGT;
307 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
312 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
322 /* No need to be in here if debugging isn't enabled */
323 if ((cm->cm_sc->mps_debug & level) == 0)
326 sbuf_new(&sb, str, sizeof(str), 0);
330 if (cm->cm_ccb != NULL) {
331 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
333 sbuf_cat(&sb, path_str);
334 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
335 scsi_command_string(&cm->cm_ccb->csio, &sb);
336 sbuf_printf(&sb, "length %d ",
337 cm->cm_ccb->csio.dxfer_len);
341 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 cam_sim_name(cm->cm_sc->sassc->sim),
343 cam_sim_unit(cm->cm_sc->sassc->sim),
344 cam_sim_bus(cm->cm_sc->sassc->sim),
345 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 sbuf_vprintf(&sb, fmt, ap);
352 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
359 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
361 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
362 struct mpssas_target *targ;
367 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
368 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
372 /* XXX retry the remove after the diag reset completes? */
373 mps_dprint(sc, MPS_FAULT,
374 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
375 mpssas_free_tm(sc, tm);
379 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
380 mps_dprint(sc, MPS_FAULT,
381 "IOCStatus = 0x%x while resetting device 0x%x\n",
382 reply->IOCStatus, handle);
383 mpssas_free_tm(sc, tm);
387 mps_dprint(sc, MPS_XINFO,
388 "Reset aborted %u commands\n", reply->TerminationCount);
389 mps_free_reply(sc, tm->cm_reply_data);
390 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
392 mps_dprint(sc, MPS_XINFO,
393 "clearing target %u handle 0x%04x\n", targ->tid, handle);
396 * Don't clear target if remove fails because things will get confusing.
397 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 * this target id if possible, and so we can assign the same target id
399 * to this device if it comes back in the future.
401 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
404 targ->encl_handle = 0x0;
405 targ->encl_slot = 0x0;
406 targ->exp_dev_handle = 0x0;
408 targ->linkrate = 0x0;
413 mpssas_free_tm(sc, tm);
418 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
419 * Otherwise Volume Delete is same as Bare Drive Removal.
422 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
424 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
425 struct mps_softc *sc;
426 struct mps_command *cm;
427 struct mpssas_target *targ = NULL;
429 MPS_FUNCTRACE(sassc->sc);
434 * If this is a WD controller, determine if the disk should be exposed
435 * to the OS or not. If disk should be exposed, return from this
436 * function without doing anything.
438 if (sc->WD_available && (sc->WD_hide_expose ==
439 MPS_WD_EXPOSE_ALWAYS)) {
444 targ = mpssas_find_target_by_handle(sassc, 0, handle);
446 /* FIXME: what is the action? */
447 /* We don't know about this device? */
448 mps_dprint(sc, MPS_ERROR,
449 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
453 targ->flags |= MPSSAS_TARGET_INREMOVAL;
455 cm = mpssas_alloc_tm(sc);
457 mps_dprint(sc, MPS_ERROR,
458 "%s: command alloc failure\n", __func__);
462 mpssas_rescan_target(sc, targ);
464 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
465 req->DevHandle = targ->handle;
466 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
467 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
469 /* SAS Hard Link Reset / SATA Link Reset */
470 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
474 cm->cm_desc.HighPriority.RequestFlags =
475 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
476 cm->cm_complete = mpssas_remove_volume;
477 cm->cm_complete_data = (void *)(uintptr_t)handle;
478 mps_map_command(sc, cm);
482 * The MPT2 firmware performs debounce on the link to avoid transient link
483 * errors and false removals. When it does decide that link has been lost
484 * and a device need to go away, it expects that the host will perform a
485 * target reset and then an op remove. The reset has the side-effect of
486 * aborting any outstanding requests for the device, which is required for
487 * the op-remove to succeed. It's not clear if the host should check for
488 * the device coming back alive after the reset.
491 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
493 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
494 struct mps_softc *sc;
495 struct mps_command *cm;
496 struct mpssas_target *targ = NULL;
498 MPS_FUNCTRACE(sassc->sc);
502 targ = mpssas_find_target_by_handle(sassc, 0, handle);
504 /* FIXME: what is the action? */
505 /* We don't know about this device? */
506 mps_dprint(sc, MPS_ERROR,
507 "%s : invalid handle 0x%x \n", __func__, handle);
511 targ->flags |= MPSSAS_TARGET_INREMOVAL;
513 cm = mpssas_alloc_tm(sc);
515 mps_dprint(sc, MPS_ERROR,
516 "%s: command alloc failure\n", __func__);
520 mpssas_rescan_target(sc, targ);
522 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
523 memset(req, 0, sizeof(*req));
524 req->DevHandle = htole16(targ->handle);
525 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 /* SAS Hard Link Reset / SATA Link Reset */
529 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
533 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
534 cm->cm_complete = mpssas_remove_device;
535 cm->cm_complete_data = (void *)(uintptr_t)handle;
536 mps_map_command(sc, cm);
540 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
542 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
543 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
544 struct mpssas_target *targ;
545 struct mps_command *next_cm;
550 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
551 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
555 * Currently there should be no way we can hit this case. It only
556 * happens when we have a failure to allocate chain frames, and
557 * task management commands don't have S/G lists.
559 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
560 mps_dprint(sc, MPS_ERROR,
561 "%s: cm_flags = %#x for remove of handle %#04x! "
562 "This should not happen!\n", __func__, tm->cm_flags,
564 mpssas_free_tm(sc, tm);
569 /* XXX retry the remove after the diag reset completes? */
570 mps_dprint(sc, MPS_FAULT,
571 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
572 mpssas_free_tm(sc, tm);
576 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
577 mps_dprint(sc, MPS_FAULT,
578 "IOCStatus = 0x%x while resetting device 0x%x\n",
579 le16toh(reply->IOCStatus), handle);
580 mpssas_free_tm(sc, tm);
584 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
585 le32toh(reply->TerminationCount));
586 mps_free_reply(sc, tm->cm_reply_data);
587 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
589 /* Reuse the existing command */
590 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
591 memset(req, 0, sizeof(*req));
592 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
593 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
594 req->DevHandle = htole16(handle);
596 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
597 tm->cm_complete = mpssas_remove_complete;
598 tm->cm_complete_data = (void *)(uintptr_t)handle;
600 mps_map_command(sc, tm);
602 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
604 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
607 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
608 ccb = tm->cm_complete_data;
609 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
610 mpssas_scsiio_complete(sc, tm);
615 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
617 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
619 struct mpssas_target *targ;
620 struct mpssas_lun *lun;
624 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
625 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628 * Currently there should be no way we can hit this case. It only
629 * happens when we have a failure to allocate chain frames, and
630 * task management commands don't have S/G lists.
632 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
633 mps_dprint(sc, MPS_XINFO,
634 "%s: cm_flags = %#x for remove of handle %#04x! "
635 "This should not happen!\n", __func__, tm->cm_flags,
637 mpssas_free_tm(sc, tm);
642 /* most likely a chip reset */
643 mps_dprint(sc, MPS_FAULT,
644 "%s NULL reply removing device 0x%04x\n", __func__, handle);
645 mpssas_free_tm(sc, tm);
649 mps_dprint(sc, MPS_XINFO,
650 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
651 handle, le16toh(reply->IOCStatus));
654 * Don't clear target if remove fails because things will get confusing.
655 * Leave the devname and sasaddr intact so that we know to avoid reusing
656 * this target id if possible, and so we can assign the same target id
657 * to this device if it comes back in the future.
659 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
662 targ->encl_handle = 0x0;
663 targ->encl_slot = 0x0;
664 targ->exp_dev_handle = 0x0;
666 targ->linkrate = 0x0;
670 while(!SLIST_EMPTY(&targ->luns)) {
671 lun = SLIST_FIRST(&targ->luns);
672 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
678 mpssas_free_tm(sc, tm);
682 mpssas_register_events(struct mps_softc *sc)
684 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
687 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
688 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
689 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
690 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
691 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
692 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
693 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
694 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
695 setbit(events, MPI2_EVENT_IR_VOLUME);
696 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
697 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
698 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
700 mps_register_events(sc, events, mpssas_evt_handler, NULL,
701 &sc->sassc->mpssas_eh);
707 mps_attach_sas(struct mps_softc *sc)
709 struct mpssas_softc *sassc;
715 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
717 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
723 * XXX MaxTargets could change during a reinit. Since we don't
724 * resize the targets[] array during such an event, cache the value
725 * of MaxTargets here so that we don't get into trouble later. This
726 * should move into the reinit logic.
728 sassc->maxtargets = sc->facts->MaxTargets;
729 sassc->targets = malloc(sizeof(struct mpssas_target) *
730 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
731 if(!sassc->targets) {
732 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
740 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
741 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
746 unit = device_get_unit(sc->mps_dev);
747 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
748 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
749 if (sassc->sim == NULL) {
750 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
755 TAILQ_INIT(&sassc->ev_queue);
757 /* Initialize taskqueue for Event Handling */
758 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
759 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
760 taskqueue_thread_enqueue, &sassc->ev_tq);
762 /* Run the task queue with lowest priority */
763 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
764 device_get_nameunit(sc->mps_dev));
769 * XXX There should be a bus for every port on the adapter, but since
770 * we're just going to fake the topology for now, we'll pretend that
771 * everything is just a target on a single bus.
773 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
774 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
781 * Assume that discovery events will start right away.
783 * Hold off boot until discovery is complete.
785 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
786 sc->sassc->startup_refcount = 0;
787 mpssas_startup_increment(sassc);
789 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
790 sassc->discovery_timeouts = 0;
795 * Register for async events so we can determine the EEDP
796 * capabilities of devices.
798 status = xpt_create_path(&sassc->path, /*periph*/NULL,
799 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
801 if (status != CAM_REQ_CMP) {
802 mps_printf(sc, "Error %#x creating sim path\n", status);
807 #if (__FreeBSD_version >= 1000006) || \
808 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
809 event = AC_ADVINFO_CHANGED;
811 event = AC_FOUND_DEVICE;
813 status = xpt_register_async(event, mpssas_async, sc,
815 if (status != CAM_REQ_CMP) {
816 mps_dprint(sc, MPS_ERROR,
817 "Error %#x registering async handler for "
818 "AC_ADVINFO_CHANGED events\n", status);
819 xpt_free_path(sassc->path);
823 if (status != CAM_REQ_CMP) {
825 * EEDP use is the exception, not the rule.
826 * Warn the user, but do not fail to attach.
828 mps_printf(sc, "EEDP capabilities disabled.\n");
833 mpssas_register_events(sc);
841 mps_detach_sas(struct mps_softc *sc)
843 struct mpssas_softc *sassc;
844 struct mpssas_lun *lun, *lun_tmp;
845 struct mpssas_target *targ;
850 if (sc->sassc == NULL)
854 mps_deregister_events(sc, sassc->mpssas_eh);
857 * Drain and free the event handling taskqueue with the lock
858 * unheld so that any parallel processing tasks drain properly
859 * without deadlocking.
861 if (sassc->ev_tq != NULL)
862 taskqueue_free(sassc->ev_tq);
864 /* Make sure CAM doesn't wedge if we had to bail out early. */
867 /* Deregister our async handler */
868 if (sassc->path != NULL) {
869 xpt_register_async(0, mpssas_async, sc, sassc->path);
870 xpt_free_path(sassc->path);
874 if (sassc->flags & MPSSAS_IN_STARTUP)
875 xpt_release_simq(sassc->sim, 1);
877 if (sassc->sim != NULL) {
878 xpt_bus_deregister(cam_sim_path(sassc->sim));
879 cam_sim_free(sassc->sim, FALSE);
882 sassc->flags |= MPSSAS_SHUTDOWN;
885 if (sassc->devq != NULL)
886 cam_simq_free(sassc->devq);
888 for(i=0; i< sassc->maxtargets ;i++) {
889 targ = &sassc->targets[i];
890 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
894 free(sassc->targets, M_MPT2);
902 mpssas_discovery_end(struct mpssas_softc *sassc)
904 struct mps_softc *sc = sassc->sc;
908 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
909 callout_stop(&sassc->discovery_callout);
914 mpssas_action(struct cam_sim *sim, union ccb *ccb)
916 struct mpssas_softc *sassc;
918 sassc = cam_sim_softc(sim);
920 MPS_FUNCTRACE(sassc->sc);
921 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
922 ccb->ccb_h.func_code);
923 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
925 switch (ccb->ccb_h.func_code) {
928 struct ccb_pathinq *cpi = &ccb->cpi;
930 cpi->version_num = 1;
931 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
932 cpi->target_sprt = 0;
933 #if __FreeBSD_version >= 1000039
934 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
936 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
938 cpi->hba_eng_cnt = 0;
939 cpi->max_target = sassc->maxtargets - 1;
941 cpi->initiator_id = sassc->maxtargets - 1;
942 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
943 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
944 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
945 cpi->unit_number = cam_sim_unit(sim);
946 cpi->bus_id = cam_sim_bus(sim);
947 cpi->base_transfer_speed = 150000;
948 cpi->transport = XPORT_SAS;
949 cpi->transport_version = 0;
950 cpi->protocol = PROTO_SCSI;
951 cpi->protocol_version = SCSI_REV_SPC;
952 #if __FreeBSD_version >= 800001
954 * XXX KDM where does this number come from?
956 cpi->maxio = 256 * 1024;
958 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
961 case XPT_GET_TRAN_SETTINGS:
963 struct ccb_trans_settings *cts;
964 struct ccb_trans_settings_sas *sas;
965 struct ccb_trans_settings_scsi *scsi;
966 struct mpssas_target *targ;
969 sas = &cts->xport_specific.sas;
970 scsi = &cts->proto_specific.scsi;
972 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
973 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
974 cts->ccb_h.target_id));
975 targ = &sassc->targets[cts->ccb_h.target_id];
976 if (targ->handle == 0x0) {
977 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
981 cts->protocol_version = SCSI_REV_SPC2;
982 cts->transport = XPORT_SAS;
983 cts->transport_version = 0;
985 sas->valid = CTS_SAS_VALID_SPEED;
986 switch (targ->linkrate) {
988 sas->bitrate = 150000;
991 sas->bitrate = 300000;
994 sas->bitrate = 600000;
1000 cts->protocol = PROTO_SCSI;
1001 scsi->valid = CTS_SCSI_VALID_TQ;
1002 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1004 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1007 case XPT_CALC_GEOMETRY:
1008 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1009 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1012 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1013 mpssas_action_resetdev(sassc, ccb);
1018 mps_dprint(sassc->sc, MPS_XINFO,
1019 "mpssas_action faking success for abort or reset\n");
1020 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1023 mpssas_action_scsiio(sassc, ccb);
1025 #if __FreeBSD_version >= 900026
1027 mpssas_action_smpio(sassc, ccb);
1031 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1039 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1040 target_id_t target_id, lun_id_t lun_id)
1042 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1043 struct cam_path *path;
1045 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__,
1046 ac_code, target_id, lun_id);
1048 if (xpt_create_path(&path, NULL,
1049 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1050 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1055 xpt_async(ac_code, path, NULL);
1056 xpt_free_path(path);
1060 mpssas_complete_all_commands(struct mps_softc *sc)
1062 struct mps_command *cm;
1067 mtx_assert(&sc->mps_mtx, MA_OWNED);
1069 /* complete all commands with a NULL reply */
1070 for (i = 1; i < sc->num_reqs; i++) {
1071 cm = &sc->commands[i];
1072 cm->cm_reply = NULL;
1075 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1076 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1078 if (cm->cm_complete != NULL) {
1079 mpssas_log_command(cm, MPS_RECOVERY,
1080 "completing cm %p state %x ccb %p for diag reset\n",
1081 cm, cm->cm_state, cm->cm_ccb);
1083 cm->cm_complete(sc, cm);
1087 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1088 mpssas_log_command(cm, MPS_RECOVERY,
1089 "waking up cm %p state %x ccb %p for diag reset\n",
1090 cm, cm->cm_state, cm->cm_ccb);
1095 if (cm->cm_sc->io_cmds_active != 0) {
1096 cm->cm_sc->io_cmds_active--;
1098 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1099 "io_cmds_active is out of sync - resynching to "
1103 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1104 /* this should never happen, but if it does, log */
1105 mpssas_log_command(cm, MPS_RECOVERY,
1106 "cm %p state %x flags 0x%x ccb %p during diag "
1107 "reset\n", cm, cm->cm_state, cm->cm_flags,
1114 mpssas_handle_reinit(struct mps_softc *sc)
1118 /* Go back into startup mode and freeze the simq, so that CAM
1119 * doesn't send any commands until after we've rediscovered all
1120 * targets and found the proper device handles for them.
1122 * After the reset, portenable will trigger discovery, and after all
1123 * discovery-related activities have finished, the simq will be
1126 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1127 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1128 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1129 mpssas_startup_increment(sc->sassc);
1131 /* notify CAM of a bus reset */
1132 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1135 /* complete and cleanup after all outstanding commands */
1136 mpssas_complete_all_commands(sc);
1138 mps_dprint(sc, MPS_INIT,
1139 "%s startup %u tm %u after command completion\n",
1140 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1142 /* zero all the target handles, since they may change after the
1143 * reset, and we have to rediscover all the targets and use the new
1146 for (i = 0; i < sc->sassc->maxtargets; i++) {
1147 if (sc->sassc->targets[i].outstanding != 0)
1148 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1149 i, sc->sassc->targets[i].outstanding);
1150 sc->sassc->targets[i].handle = 0x0;
1151 sc->sassc->targets[i].exp_dev_handle = 0x0;
1152 sc->sassc->targets[i].outstanding = 0;
1153 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1158 mpssas_tm_timeout(void *data)
1160 struct mps_command *tm = data;
1161 struct mps_softc *sc = tm->cm_sc;
1163 mtx_assert(&sc->mps_mtx, MA_OWNED);
1165 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1166 "task mgmt %p timed out\n", tm);
1171 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1173 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1174 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1175 unsigned int cm_count = 0;
1176 struct mps_command *cm;
1177 struct mpssas_target *targ;
1179 callout_stop(&tm->cm_callout);
1181 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1182 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1186 * Currently there should be no way we can hit this case. It only
1187 * happens when we have a failure to allocate chain frames, and
1188 * task management commands don't have S/G lists.
1189 * XXXSL So should it be an assertion?
1191 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1192 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1193 "This should not happen!\n", __func__, tm->cm_flags);
1194 mpssas_free_tm(sc, tm);
1198 if (reply == NULL) {
1199 mpssas_log_command(tm, MPS_RECOVERY,
1200 "NULL reset reply for tm %p\n", tm);
1201 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1202 /* this completion was due to a reset, just cleanup */
1203 targ->flags &= ~MPSSAS_TARGET_INRESET;
1205 mpssas_free_tm(sc, tm);
1208 /* we should have gotten a reply. */
1214 mpssas_log_command(tm, MPS_RECOVERY,
1215 "logical unit reset status 0x%x code 0x%x count %u\n",
1216 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1217 le32toh(reply->TerminationCount));
1219 /* See if there are any outstanding commands for this LUN.
1220 * This could be made more efficient by using a per-LU data
1221 * structure of some sort.
1223 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1224 if (cm->cm_lun == tm->cm_lun)
1228 if (cm_count == 0) {
1229 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1230 "logical unit %u finished recovery after reset\n",
1233 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1236 /* we've finished recovery for this logical unit. check and
1237 * see if some other logical unit has a timedout command
1238 * that needs to be processed.
1240 cm = TAILQ_FIRST(&targ->timedout_commands);
1242 mpssas_send_abort(sc, tm, cm);
1246 mpssas_free_tm(sc, tm);
1250 /* if we still have commands for this LUN, the reset
1251 * effectively failed, regardless of the status reported.
1252 * Escalate to a target reset.
1254 mpssas_log_command(tm, MPS_RECOVERY,
1255 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1257 mpssas_send_reset(sc, tm,
1258 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1263 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1265 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1266 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1267 struct mpssas_target *targ;
1269 callout_stop(&tm->cm_callout);
1271 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1272 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1276 * Currently there should be no way we can hit this case. It only
1277 * happens when we have a failure to allocate chain frames, and
1278 * task management commands don't have S/G lists.
1280 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1281 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1282 "This should not happen!\n", __func__, tm->cm_flags);
1283 mpssas_free_tm(sc, tm);
1287 if (reply == NULL) {
1288 mpssas_log_command(tm, MPS_RECOVERY,
1289 "NULL reset reply for tm %p\n", tm);
1290 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1291 /* this completion was due to a reset, just cleanup */
1292 targ->flags &= ~MPSSAS_TARGET_INRESET;
1294 mpssas_free_tm(sc, tm);
1297 /* we should have gotten a reply. */
1303 mpssas_log_command(tm, MPS_RECOVERY,
1304 "target reset status 0x%x code 0x%x count %u\n",
1305 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1306 le32toh(reply->TerminationCount));
1308 targ->flags &= ~MPSSAS_TARGET_INRESET;
1310 if (targ->outstanding == 0) {
1311 /* we've finished recovery for this target and all
1312 * of its logical units.
1314 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1315 "recovery finished after target reset\n");
1317 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1321 mpssas_free_tm(sc, tm);
1324 /* after a target reset, if this target still has
1325 * outstanding commands, the reset effectively failed,
1326 * regardless of the status reported. escalate.
1328 mpssas_log_command(tm, MPS_RECOVERY,
1329 "target reset complete for tm %p, but still have %u command(s)\n",
1330 tm, targ->outstanding);
1335 #define MPS_RESET_TIMEOUT 30
1338 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1340 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1341 struct mpssas_target *target;
1344 target = tm->cm_targ;
1345 if (target->handle == 0) {
1346 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1347 __func__, target->tid);
1351 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1352 req->DevHandle = htole16(target->handle);
1353 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1354 req->TaskType = type;
1356 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1357 /* XXX Need to handle invalid LUNs */
1358 MPS_SET_LUN(req->LUN, tm->cm_lun);
1359 tm->cm_targ->logical_unit_resets++;
1360 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1361 "sending logical unit reset\n");
1362 tm->cm_complete = mpssas_logical_unit_reset_complete;
1364 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1365 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1366 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1367 tm->cm_targ->target_resets++;
1368 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1369 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1370 "sending target reset\n");
1371 tm->cm_complete = mpssas_target_reset_complete;
1374 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1379 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1380 tm->cm_complete_data = (void *)tm;
1382 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1383 mpssas_tm_timeout, tm);
1385 err = mps_map_command(sc, tm);
1387 mpssas_log_command(tm, MPS_RECOVERY,
1388 "error %d sending reset type %u\n",
1396 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1398 struct mps_command *cm;
1399 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1400 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1401 struct mpssas_target *targ;
1403 callout_stop(&tm->cm_callout);
1405 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1406 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1410 * Currently there should be no way we can hit this case. It only
1411 * happens when we have a failure to allocate chain frames, and
1412 * task management commands don't have S/G lists.
1414 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1415 mpssas_log_command(tm, MPS_RECOVERY,
1416 "cm_flags = %#x for abort %p TaskMID %u!\n",
1417 tm->cm_flags, tm, le16toh(req->TaskMID));
1418 mpssas_free_tm(sc, tm);
1422 if (reply == NULL) {
1423 mpssas_log_command(tm, MPS_RECOVERY,
1424 "NULL abort reply for tm %p TaskMID %u\n",
1425 tm, le16toh(req->TaskMID));
1426 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1427 /* this completion was due to a reset, just cleanup */
1429 mpssas_free_tm(sc, tm);
1432 /* we should have gotten a reply. */
1438 mpssas_log_command(tm, MPS_RECOVERY,
1439 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1440 le16toh(req->TaskMID),
1441 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1442 le32toh(reply->TerminationCount));
1444 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1446 /* if there are no more timedout commands, we're done with
1447 * error recovery for this target.
1449 mpssas_log_command(tm, MPS_RECOVERY,
1450 "finished recovery after aborting TaskMID %u\n",
1451 le16toh(req->TaskMID));
1454 mpssas_free_tm(sc, tm);
1456 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1457 /* abort success, but we have more timedout commands to abort */
1458 mpssas_log_command(tm, MPS_RECOVERY,
1459 "continuing recovery after aborting TaskMID %u\n",
1460 le16toh(req->TaskMID));
1462 mpssas_send_abort(sc, tm, cm);
1465 /* we didn't get a command completion, so the abort
1466 * failed as far as we're concerned. escalate.
1468 mpssas_log_command(tm, MPS_RECOVERY,
1469 "abort failed for TaskMID %u tm %p\n",
1470 le16toh(req->TaskMID), tm);
1472 mpssas_send_reset(sc, tm,
1473 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1477 #define MPS_ABORT_TIMEOUT 5
1480 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1482 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1483 struct mpssas_target *targ;
1487 if (targ->handle == 0) {
1488 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1489 __func__, cm->cm_ccb->ccb_h.target_id);
1493 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1494 "Aborting command %p\n", cm);
1496 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1497 req->DevHandle = htole16(targ->handle);
1498 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1499 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1501 /* XXX Need to handle invalid LUNs */
1502 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1504 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1507 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1508 tm->cm_complete = mpssas_abort_complete;
1509 tm->cm_complete_data = (void *)tm;
1510 tm->cm_targ = cm->cm_targ;
1511 tm->cm_lun = cm->cm_lun;
1513 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1514 mpssas_tm_timeout, tm);
1518 err = mps_map_command(sc, tm);
1520 mpssas_log_command(tm, MPS_RECOVERY,
1521 "error %d sending abort for cm %p SMID %u\n",
1522 err, cm, req->TaskMID);
1528 mpssas_scsiio_timeout(void *data)
1530 struct mps_softc *sc;
1531 struct mps_command *cm;
1532 struct mpssas_target *targ;
1534 cm = (struct mps_command *)data;
1538 mtx_assert(&sc->mps_mtx, MA_OWNED);
1540 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1543 * Run the interrupt handler to make sure it's not pending. This
1544 * isn't perfect because the command could have already completed
1545 * and been re-used, though this is unlikely.
1547 mps_intr_locked(sc);
1548 if (cm->cm_state == MPS_CM_STATE_FREE) {
1549 mpssas_log_command(cm, MPS_XINFO,
1550 "SCSI command %p almost timed out\n", cm);
1554 if (cm->cm_ccb == NULL) {
1555 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1559 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1565 /* XXX first, check the firmware state, to see if it's still
1566 * operational. if not, do a diag reset.
1568 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1569 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1570 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1572 if (targ->tm != NULL) {
1573 /* target already in recovery, just queue up another
1574 * timedout command to be processed later.
1576 mps_dprint(sc, MPS_RECOVERY,
1577 "queued timedout cm %p for processing by tm %p\n",
1580 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1581 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1584 /* start recovery by aborting the first timedout command */
1585 mpssas_send_abort(sc, targ->tm, cm);
1588 /* XXX queue this target up for recovery once a TM becomes
1589 * available. The firmware only has a limited number of
1590 * HighPriority credits for the high priority requests used
1591 * for task management, and we ran out.
1593 * Isilon: don't worry about this for now, since we have
1594 * more credits than disks in an enclosure, and limit
1595 * ourselves to one TM per target for recovery.
1597 mps_dprint(sc, MPS_RECOVERY,
1598 "timedout cm %p failed to allocate a tm\n", cm);
1604 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1606 MPI2_SCSI_IO_REQUEST *req;
1607 struct ccb_scsiio *csio;
1608 struct mps_softc *sc;
1609 struct mpssas_target *targ;
1610 struct mpssas_lun *lun;
1611 struct mps_command *cm;
1612 uint8_t i, lba_byte, *ref_tag_addr;
1613 uint16_t eedp_flags;
1614 uint32_t mpi_control;
1618 mtx_assert(&sc->mps_mtx, MA_OWNED);
1621 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1622 ("Target %d out of bounds in XPT_SCSI_IO\n",
1623 csio->ccb_h.target_id));
1624 targ = &sassc->targets[csio->ccb_h.target_id];
1625 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1626 if (targ->handle == 0x0) {
1627 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1628 __func__, csio->ccb_h.target_id);
1629 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1633 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1634 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1635 "supported %u\n", __func__, csio->ccb_h.target_id);
1636 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1641 * Sometimes, it is possible to get a command that is not "In
1642 * Progress" and was actually aborted by the upper layer. Check for
1643 * this here and complete the command without error.
1645 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1646 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1647 "target %u\n", __func__, csio->ccb_h.target_id);
1652 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1653 * that the volume has timed out. We want volumes to be enumerated
1654 * until they are deleted/removed, not just failed.
1656 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1657 if (targ->devinfo == 0)
1658 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1660 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1665 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1666 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1667 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1672 cm = mps_alloc_command(sc);
1673 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1675 mps_free_command(sc, cm);
1677 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1678 xpt_freeze_simq(sassc->sim, 1);
1679 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1681 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1682 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1687 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1688 bzero(req, sizeof(*req));
1689 req->DevHandle = htole16(targ->handle);
1690 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1692 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1693 req->SenseBufferLength = MPS_SENSE_LEN;
1695 req->ChainOffset = 0;
1696 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1701 req->DataLength = htole32(csio->dxfer_len);
1702 req->BidirectionalDataLength = 0;
1703 req->IoFlags = htole16(csio->cdb_len);
1706 /* Note: BiDirectional transfers are not supported */
1707 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1709 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1710 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1713 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1714 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1718 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1722 if (csio->cdb_len == 32)
1723 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1725 * It looks like the hardware doesn't require an explicit tag
1726 * number for each transaction. SAM Task Management not supported
1729 switch (csio->tag_action) {
1730 case MSG_HEAD_OF_Q_TAG:
1731 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1733 case MSG_ORDERED_Q_TAG:
1734 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1737 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1739 case CAM_TAG_ACTION_NONE:
1740 case MSG_SIMPLE_Q_TAG:
1742 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1745 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1746 req->Control = htole32(mpi_control);
1747 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1748 mps_free_command(sc, cm);
1749 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1754 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1755 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1757 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1758 req->IoFlags = htole16(csio->cdb_len);
1761 * Check if EEDP is supported and enabled. If it is then check if the
1762 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1763 * is formatted for EEDP support. If all of this is true, set CDB up
1764 * for EEDP transfer.
1766 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1767 if (sc->eedp_enabled && eedp_flags) {
1768 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1769 if (lun->lun_id == csio->ccb_h.target_lun) {
1774 if ((lun != NULL) && (lun->eedp_formatted)) {
1775 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1776 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1777 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1778 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1779 req->EEDPFlags = htole16(eedp_flags);
1782 * If CDB less than 32, fill in Primary Ref Tag with
1783 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1784 * already there. Also, set protection bit. FreeBSD
1785 * currently does not support CDBs bigger than 16, but
1786 * the code doesn't hurt, and will be here for the
1789 if (csio->cdb_len != 32) {
1790 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1791 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1792 PrimaryReferenceTag;
1793 for (i = 0; i < 4; i++) {
1795 req->CDB.CDB32[lba_byte + i];
1798 req->CDB.EEDP32.PrimaryReferenceTag =
1799 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1800 req->CDB.EEDP32.PrimaryApplicationTagMask =
1802 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1806 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1807 req->EEDPFlags = htole16(eedp_flags);
1808 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1814 cm->cm_length = csio->dxfer_len;
1815 if (cm->cm_length != 0) {
1817 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1821 cm->cm_sge = &req->SGL;
1822 cm->cm_sglsize = (32 - 24) * 4;
1823 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1824 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1825 cm->cm_complete = mpssas_scsiio_complete;
1826 cm->cm_complete_data = ccb;
1828 cm->cm_lun = csio->ccb_h.target_lun;
1832 * If HBA is a WD and the command is not for a retry, try to build a
1833 * direct I/O message. If failed, or the command is for a retry, send
1834 * the I/O to the IR volume itself.
1836 if (sc->WD_valid_config) {
1837 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1838 mpssas_direct_drive_io(sassc, cm, ccb);
1840 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1844 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1845 mpssas_scsiio_timeout, cm, 0);
1848 targ->outstanding++;
1849 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1850 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1852 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1853 __func__, cm, ccb, targ->outstanding);
1855 mps_map_command(sc, cm);
1860 mps_response_code(struct mps_softc *sc, u8 response_code)
1864 switch (response_code) {
1865 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1866 desc = "task management request completed";
1868 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1869 desc = "invalid frame";
1871 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1872 desc = "task management request not supported";
1874 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1875 desc = "task management request failed";
1877 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1878 desc = "task management request succeeded";
1880 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1881 desc = "invalid lun";
1884 desc = "overlapped tag attempted";
1886 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1887 desc = "task queued, however not sent to target";
1893 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1894 response_code, desc);
1897 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1900 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1901 Mpi2SCSIIOReply_t *mpi_reply)
1905 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1906 MPI2_IOCSTATUS_MASK;
1907 u8 scsi_state = mpi_reply->SCSIState;
1908 u8 scsi_status = mpi_reply->SCSIStatus;
1909 char *desc_ioc_state = NULL;
1910 char *desc_scsi_status = NULL;
1911 char *desc_scsi_state = sc->tmp_string;
1912 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1914 if (log_info == 0x31170000)
1917 switch (ioc_status) {
1918 case MPI2_IOCSTATUS_SUCCESS:
1919 desc_ioc_state = "success";
1921 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1922 desc_ioc_state = "invalid function";
1924 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1925 desc_ioc_state = "scsi recovered error";
1927 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1928 desc_ioc_state = "scsi invalid dev handle";
1930 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1931 desc_ioc_state = "scsi device not there";
1933 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1934 desc_ioc_state = "scsi data overrun";
1936 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1937 desc_ioc_state = "scsi data underrun";
1939 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1940 desc_ioc_state = "scsi io data error";
1942 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1943 desc_ioc_state = "scsi protocol error";
1945 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1946 desc_ioc_state = "scsi task terminated";
1948 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1949 desc_ioc_state = "scsi residual mismatch";
1951 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1952 desc_ioc_state = "scsi task mgmt failed";
1954 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1955 desc_ioc_state = "scsi ioc terminated";
1957 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1958 desc_ioc_state = "scsi ext terminated";
1960 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1961 desc_ioc_state = "eedp guard error";
1963 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1964 desc_ioc_state = "eedp ref tag error";
1966 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1967 desc_ioc_state = "eedp app tag error";
1970 desc_ioc_state = "unknown";
1974 switch (scsi_status) {
1975 case MPI2_SCSI_STATUS_GOOD:
1976 desc_scsi_status = "good";
1978 case MPI2_SCSI_STATUS_CHECK_CONDITION:
1979 desc_scsi_status = "check condition";
1981 case MPI2_SCSI_STATUS_CONDITION_MET:
1982 desc_scsi_status = "condition met";
1984 case MPI2_SCSI_STATUS_BUSY:
1985 desc_scsi_status = "busy";
1987 case MPI2_SCSI_STATUS_INTERMEDIATE:
1988 desc_scsi_status = "intermediate";
1990 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1991 desc_scsi_status = "intermediate condmet";
1993 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
1994 desc_scsi_status = "reservation conflict";
1996 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
1997 desc_scsi_status = "command terminated";
1999 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2000 desc_scsi_status = "task set full";
2002 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2003 desc_scsi_status = "aca active";
2005 case MPI2_SCSI_STATUS_TASK_ABORTED:
2006 desc_scsi_status = "task aborted";
2009 desc_scsi_status = "unknown";
2013 desc_scsi_state[0] = '\0';
2015 desc_scsi_state = " ";
2016 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2017 strcat(desc_scsi_state, "response info ");
2018 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2019 strcat(desc_scsi_state, "state terminated ");
2020 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2021 strcat(desc_scsi_state, "no status ");
2022 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2023 strcat(desc_scsi_state, "autosense failed ");
2024 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2025 strcat(desc_scsi_state, "autosense valid ");
2027 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2028 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2029 /* We can add more detail about underflow data here
2032 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2033 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2034 desc_scsi_state, scsi_state);
2036 if (sc->mps_debug & MPS_XINFO &&
2037 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2038 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2039 scsi_sense_print(csio);
2040 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2043 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2044 response_info = le32toh(mpi_reply->ResponseInfo);
2045 response_bytes = (u8 *)&response_info;
2046 mps_response_code(sc,response_bytes[0]);
2051 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2053 MPI2_SCSI_IO_REPLY *rep;
2055 struct ccb_scsiio *csio;
2056 struct mpssas_softc *sassc;
2057 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2058 u8 *TLR_bits, TLR_on;
2063 mps_dprint(sc, MPS_TRACE,
2064 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2065 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2066 cm->cm_targ->outstanding);
2068 callout_stop(&cm->cm_callout);
2069 mtx_assert(&sc->mps_mtx, MA_OWNED);
2072 ccb = cm->cm_complete_data;
2074 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2076 * XXX KDM if the chain allocation fails, does it matter if we do
2077 * the sync and unload here? It is simpler to do it in every case,
2078 * assuming it doesn't cause problems.
2080 if (cm->cm_data != NULL) {
2081 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2082 dir = BUS_DMASYNC_POSTREAD;
2083 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2084 dir = BUS_DMASYNC_POSTWRITE;
2085 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2086 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2089 cm->cm_targ->completed++;
2090 cm->cm_targ->outstanding--;
2091 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2092 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2094 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2095 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2096 if (cm->cm_reply != NULL)
2097 mpssas_log_command(cm, MPS_RECOVERY,
2098 "completed timedout cm %p ccb %p during recovery "
2099 "ioc %x scsi %x state %x xfer %u\n",
2101 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2102 le32toh(rep->TransferCount));
2104 mpssas_log_command(cm, MPS_RECOVERY,
2105 "completed timedout cm %p ccb %p during recovery\n",
2107 } else if (cm->cm_targ->tm != NULL) {
2108 if (cm->cm_reply != NULL)
2109 mpssas_log_command(cm, MPS_RECOVERY,
2110 "completed cm %p ccb %p during recovery "
2111 "ioc %x scsi %x state %x xfer %u\n",
2113 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2114 le32toh(rep->TransferCount));
2116 mpssas_log_command(cm, MPS_RECOVERY,
2117 "completed cm %p ccb %p during recovery\n",
2119 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2120 mpssas_log_command(cm, MPS_RECOVERY,
2121 "reset completed cm %p ccb %p\n",
2125 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2127 * We ran into an error after we tried to map the command,
2128 * so we're getting a callback without queueing the command
2129 * to the hardware. So we set the status here, and it will
2130 * be retained below. We'll go through the "fast path",
2131 * because there can be no reply when we haven't actually
2132 * gone out to the hardware.
2134 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2137 * Currently the only error included in the mask is
2138 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2139 * chain frames. We need to freeze the queue until we get
2140 * a command that completed without this error, which will
2141 * hopefully have some chain frames attached that we can
2142 * use. If we wanted to get smarter about it, we would
2143 * only unfreeze the queue in this condition when we're
2144 * sure that we're getting some chain frames back. That's
2145 * probably unnecessary.
2147 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2148 xpt_freeze_simq(sassc->sim, 1);
2149 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2150 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2151 "freezing SIM queue\n");
2156 * If this is a Start Stop Unit command and it was issued by the driver
2157 * during shutdown, decrement the refcount to account for all of the
2158 * commands that were sent. All SSU commands should be completed before
2159 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2162 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2163 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2167 /* Take the fast path to completion */
2168 if (cm->cm_reply == NULL) {
2169 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2170 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2171 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2173 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2174 ccb->csio.scsi_status = SCSI_STATUS_OK;
2176 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2177 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2178 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2179 mps_dprint(sc, MPS_XINFO,
2180 "Unfreezing SIM queue\n");
2185 * There are two scenarios where the status won't be
2186 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2187 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2189 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2191 * Freeze the dev queue so that commands are
2192 * executed in the correct order after error
2195 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2196 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2198 mps_free_command(sc, cm);
2203 mpssas_log_command(cm, MPS_XINFO,
2204 "ioc %x scsi %x state %x xfer %u\n",
2205 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2206 le32toh(rep->TransferCount));
2209 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2210 * Volume if an error occurred (normal I/O retry). Use the original
2211 * CCB, but set a flag that this will be a retry so that it's sent to
2212 * the original volume. Free the command but reuse the CCB.
2214 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2215 mps_free_command(sc, cm);
2216 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2217 mpssas_action_scsiio(sassc, ccb);
2220 ccb->ccb_h.sim_priv.entries[0].field = 0;
2222 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2223 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2224 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2226 case MPI2_IOCSTATUS_SUCCESS:
2227 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2229 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2230 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2231 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2233 /* Completion failed at the transport level. */
2234 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2235 MPI2_SCSI_STATE_TERMINATED)) {
2236 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2240 /* In a modern packetized environment, an autosense failure
2241 * implies that there's not much else that can be done to
2242 * recover the command.
2244 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2245 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2250 * CAM doesn't care about SAS Response Info data, but if this is
2251 * the state check if TLR should be done. If not, clear the
2252 * TLR_bits for the target.
2254 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2255 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2256 MPS_SCSI_RI_INVALID_FRAME)) {
2257 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2258 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2262 * Intentionally override the normal SCSI status reporting
2263 * for these two cases. These are likely to happen in a
2264 * multi-initiator environment, and we want to make sure that
2265 * CAM retries these commands rather than fail them.
2267 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2268 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2269 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2273 /* Handle normal status and sense */
2274 csio->scsi_status = rep->SCSIStatus;
2275 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2276 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2278 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2280 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2281 int sense_len, returned_sense_len;
2283 returned_sense_len = min(le32toh(rep->SenseCount),
2284 sizeof(struct scsi_sense_data));
2285 if (returned_sense_len < ccb->csio.sense_len)
2286 ccb->csio.sense_resid = ccb->csio.sense_len -
2289 ccb->csio.sense_resid = 0;
2291 sense_len = min(returned_sense_len,
2292 ccb->csio.sense_len - ccb->csio.sense_resid);
2293 bzero(&ccb->csio.sense_data,
2294 sizeof(ccb->csio.sense_data));
2295 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2296 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2300 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2301 * and it's page code 0 (Supported Page List), and there is
2302 * inquiry data, and this is for a sequential access device, and
2303 * the device is an SSP target, and TLR is supported by the
2304 * controller, turn the TLR_bits value ON if page 0x90 is
2307 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2308 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2309 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2310 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2311 (csio->data_ptr != NULL) &&
2312 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2313 (sc->control_TLR) &&
2314 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2315 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2316 vpd_list = (struct scsi_vpd_supported_page_list *)
2318 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2320 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2321 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2322 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2323 csio->cdb_io.cdb_bytes[4];
2324 alloc_len -= csio->resid;
2325 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2326 if (vpd_list->list[i] == 0x90) {
2333 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2334 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2336 * If devinfo is 0 this will be a volume. In that case don't
2337 * tell CAM that the volume is not there. We want volumes to
2338 * be enumerated until they are deleted/removed, not just
2341 if (cm->cm_targ->devinfo == 0)
2342 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2344 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2346 case MPI2_IOCSTATUS_INVALID_SGL:
2347 mps_print_scsiio_cmd(sc, cm);
2348 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2350 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2352 * This is one of the responses that comes back when an I/O
2353 * has been aborted. If it is because of a timeout that we
2354 * initiated, just set the status to CAM_CMD_TIMEOUT.
2355 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2356 * command is the same (it gets retried, subject to the
2357 * retry counter), the only difference is what gets printed
2360 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2361 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2363 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2365 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2366 /* resid is ignored for this condition */
2368 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2370 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2371 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2373 * Since these are generally external (i.e. hopefully
2374 * transient transport-related) errors, retry these without
2375 * decrementing the retry count.
2377 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2378 mpssas_log_command(cm, MPS_INFO,
2379 "terminated ioc %x scsi %x state %x xfer %u\n",
2380 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2381 le32toh(rep->TransferCount));
2383 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2384 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2385 case MPI2_IOCSTATUS_INVALID_VPID:
2386 case MPI2_IOCSTATUS_INVALID_FIELD:
2387 case MPI2_IOCSTATUS_INVALID_STATE:
2388 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2389 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2390 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2391 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2392 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2394 mpssas_log_command(cm, MPS_XINFO,
2395 "completed ioc %x scsi %x state %x xfer %u\n",
2396 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2397 le32toh(rep->TransferCount));
2398 csio->resid = cm->cm_length;
2399 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2403 mps_sc_failed_io_info(sc,csio,rep);
2405 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2406 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2407 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2408 mps_dprint(sc, MPS_XINFO, "Command completed, "
2409 "unfreezing SIM queue\n");
2412 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2413 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2414 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2417 mps_free_command(sc, cm);
2421 /* All Request reached here are Endian safe */
2423 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2425 pMpi2SCSIIORequest_t pIO_req;
2426 struct mps_softc *sc = sassc->sc;
2428 uint32_t physLBA, stripe_offset, stripe_unit;
2429 uint32_t io_size, column;
2430 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2433 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2434 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2435 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2436 * bit different than the 10/16 CDBs, handle them separately.
2438 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2439 CDB = pIO_req->CDB.CDB32;
2442 * Handle 6 byte CDBs.
2444 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2445 (CDB[0] == WRITE_6))) {
2447 * Get the transfer size in blocks.
2449 io_size = (cm->cm_length >> sc->DD_block_exponent);
2452 * Get virtual LBA given in the CDB.
2454 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2455 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2458 * Check that LBA range for I/O does not exceed volume's
2461 if ((virtLBA + (uint64_t)io_size - 1) <=
2464 * Check if the I/O crosses a stripe boundary. If not,
2465 * translate the virtual LBA to a physical LBA and set
2466 * the DevHandle for the PhysDisk to be used. If it
2467 * does cross a boundry, do normal I/O. To get the
2468 * right DevHandle to use, get the map number for the
2469 * column, then use that map number to look up the
2470 * DevHandle of the PhysDisk.
2472 stripe_offset = (uint32_t)virtLBA &
2473 (sc->DD_stripe_size - 1);
2474 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2475 physLBA = (uint32_t)virtLBA >>
2476 sc->DD_stripe_exponent;
2477 stripe_unit = physLBA / sc->DD_num_phys_disks;
2478 column = physLBA % sc->DD_num_phys_disks;
2479 pIO_req->DevHandle =
2480 htole16(sc->DD_column_map[column].dev_handle);
2481 /* ???? Is this endian safe*/
2482 cm->cm_desc.SCSIIO.DevHandle =
2485 physLBA = (stripe_unit <<
2486 sc->DD_stripe_exponent) + stripe_offset;
2487 ptrLBA = &pIO_req->CDB.CDB32[1];
2488 physLBA_byte = (uint8_t)(physLBA >> 16);
2489 *ptrLBA = physLBA_byte;
2490 ptrLBA = &pIO_req->CDB.CDB32[2];
2491 physLBA_byte = (uint8_t)(physLBA >> 8);
2492 *ptrLBA = physLBA_byte;
2493 ptrLBA = &pIO_req->CDB.CDB32[3];
2494 physLBA_byte = (uint8_t)physLBA;
2495 *ptrLBA = physLBA_byte;
2498 * Set flag that Direct Drive I/O is
2501 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2508 * Handle 10, 12 or 16 byte CDBs.
2510 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2511 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2512 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2513 (CDB[0] == WRITE_12))) {
2515 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2516 * are 0. If not, this is accessing beyond 2TB so handle it in
2517 * the else section. 10-byte and 12-byte CDB's are OK.
2518 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2519 * ready to accept 12byte CDB for Direct IOs.
2521 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2522 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2523 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2525 * Get the transfer size in blocks.
2527 io_size = (cm->cm_length >> sc->DD_block_exponent);
2530 * Get virtual LBA. Point to correct lower 4 bytes of
2531 * LBA in the CDB depending on command.
2533 lba_idx = ((CDB[0] == READ_12) ||
2534 (CDB[0] == WRITE_12) ||
2535 (CDB[0] == READ_10) ||
2536 (CDB[0] == WRITE_10))? 2 : 6;
2537 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2538 ((uint64_t)CDB[lba_idx + 1] << 16) |
2539 ((uint64_t)CDB[lba_idx + 2] << 8) |
2540 (uint64_t)CDB[lba_idx + 3];
2543 * Check that LBA range for I/O does not exceed volume's
2546 if ((virtLBA + (uint64_t)io_size - 1) <=
2549 * Check if the I/O crosses a stripe boundary.
2550 * If not, translate the virtual LBA to a
2551 * physical LBA and set the DevHandle for the
2552 * PhysDisk to be used. If it does cross a
2553 * boundry, do normal I/O. To get the right
2554 * DevHandle to use, get the map number for the
2555 * column, then use that map number to look up
2556 * the DevHandle of the PhysDisk.
2558 stripe_offset = (uint32_t)virtLBA &
2559 (sc->DD_stripe_size - 1);
2560 if ((stripe_offset + io_size) <=
2561 sc->DD_stripe_size) {
2562 physLBA = (uint32_t)virtLBA >>
2563 sc->DD_stripe_exponent;
2564 stripe_unit = physLBA /
2565 sc->DD_num_phys_disks;
2567 sc->DD_num_phys_disks;
2568 pIO_req->DevHandle =
2569 htole16(sc->DD_column_map[column].
2571 cm->cm_desc.SCSIIO.DevHandle =
2574 physLBA = (stripe_unit <<
2575 sc->DD_stripe_exponent) +
2578 &pIO_req->CDB.CDB32[lba_idx];
2579 physLBA_byte = (uint8_t)(physLBA >> 24);
2580 *ptrLBA = physLBA_byte;
2582 &pIO_req->CDB.CDB32[lba_idx + 1];
2583 physLBA_byte = (uint8_t)(physLBA >> 16);
2584 *ptrLBA = physLBA_byte;
2586 &pIO_req->CDB.CDB32[lba_idx + 2];
2587 physLBA_byte = (uint8_t)(physLBA >> 8);
2588 *ptrLBA = physLBA_byte;
2590 &pIO_req->CDB.CDB32[lba_idx + 3];
2591 physLBA_byte = (uint8_t)physLBA;
2592 *ptrLBA = physLBA_byte;
2595 * Set flag that Direct Drive I/O is
2598 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2603 * 16-byte CDB and the upper 4 bytes of the CDB are not
2604 * 0. Get the transfer size in blocks.
2606 io_size = (cm->cm_length >> sc->DD_block_exponent);
2611 virtLBA = ((uint64_t)CDB[2] << 54) |
2612 ((uint64_t)CDB[3] << 48) |
2613 ((uint64_t)CDB[4] << 40) |
2614 ((uint64_t)CDB[5] << 32) |
2615 ((uint64_t)CDB[6] << 24) |
2616 ((uint64_t)CDB[7] << 16) |
2617 ((uint64_t)CDB[8] << 8) |
2621 * Check that LBA range for I/O does not exceed volume's
2624 if ((virtLBA + (uint64_t)io_size - 1) <=
2627 * Check if the I/O crosses a stripe boundary.
2628 * If not, translate the virtual LBA to a
2629 * physical LBA and set the DevHandle for the
2630 * PhysDisk to be used. If it does cross a
2631 * boundry, do normal I/O. To get the right
2632 * DevHandle to use, get the map number for the
2633 * column, then use that map number to look up
2634 * the DevHandle of the PhysDisk.
2636 stripe_offset = (uint32_t)virtLBA &
2637 (sc->DD_stripe_size - 1);
2638 if ((stripe_offset + io_size) <=
2639 sc->DD_stripe_size) {
2640 physLBA = (uint32_t)(virtLBA >>
2641 sc->DD_stripe_exponent);
2642 stripe_unit = physLBA /
2643 sc->DD_num_phys_disks;
2645 sc->DD_num_phys_disks;
2646 pIO_req->DevHandle =
2647 htole16(sc->DD_column_map[column].
2649 cm->cm_desc.SCSIIO.DevHandle =
2652 physLBA = (stripe_unit <<
2653 sc->DD_stripe_exponent) +
2657 * Set upper 4 bytes of LBA to 0. We
2658 * assume that the phys disks are less
2659 * than 2 TB's in size. Then, set the
2662 pIO_req->CDB.CDB32[2] = 0;
2663 pIO_req->CDB.CDB32[3] = 0;
2664 pIO_req->CDB.CDB32[4] = 0;
2665 pIO_req->CDB.CDB32[5] = 0;
2666 ptrLBA = &pIO_req->CDB.CDB32[6];
2667 physLBA_byte = (uint8_t)(physLBA >> 24);
2668 *ptrLBA = physLBA_byte;
2669 ptrLBA = &pIO_req->CDB.CDB32[7];
2670 physLBA_byte = (uint8_t)(physLBA >> 16);
2671 *ptrLBA = physLBA_byte;
2672 ptrLBA = &pIO_req->CDB.CDB32[8];
2673 physLBA_byte = (uint8_t)(physLBA >> 8);
2674 *ptrLBA = physLBA_byte;
2675 ptrLBA = &pIO_req->CDB.CDB32[9];
2676 physLBA_byte = (uint8_t)physLBA;
2677 *ptrLBA = physLBA_byte;
2680 * Set flag that Direct Drive I/O is
2683 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2690 #if __FreeBSD_version >= 900026
2692 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2694 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2695 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2699 ccb = cm->cm_complete_data;
2702 * Currently there should be no way we can hit this case. It only
2703 * happens when we have a failure to allocate chain frames, and SMP
2704 * commands require two S/G elements only. That should be handled
2705 * in the standard request size.
2707 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2708 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2709 __func__, cm->cm_flags);
2710 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2714 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2716 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2717 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2721 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2722 sasaddr = le32toh(req->SASAddress.Low);
2723 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2725 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2726 MPI2_IOCSTATUS_SUCCESS ||
2727 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2728 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2729 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2730 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2734 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2735 "%#jx completed successfully\n", __func__,
2736 (uintmax_t)sasaddr);
2738 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2739 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2741 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2745 * We sync in both directions because we had DMAs in the S/G list
2746 * in both directions.
2748 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2749 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2750 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2751 mps_free_command(sc, cm);
2756 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2758 struct mps_command *cm;
2759 uint8_t *request, *response;
2760 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2761 struct mps_softc *sc;
2770 * XXX We don't yet support physical addresses here.
2772 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2773 case CAM_DATA_PADDR:
2774 case CAM_DATA_SG_PADDR:
2775 mps_dprint(sc, MPS_ERROR,
2776 "%s: physical addresses not supported\n", __func__);
2777 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2782 * The chip does not support more than one buffer for the
2783 * request or response.
2785 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2786 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2787 mps_dprint(sc, MPS_ERROR,
2788 "%s: multiple request or response "
2789 "buffer segments not supported for SMP\n",
2791 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2797 * The CAM_SCATTER_VALID flag was originally implemented
2798 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2799 * We have two. So, just take that flag to mean that we
2800 * might have S/G lists, and look at the S/G segment count
2801 * to figure out whether that is the case for each individual
2804 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2805 bus_dma_segment_t *req_sg;
2807 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2808 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2810 request = ccb->smpio.smp_request;
2812 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2813 bus_dma_segment_t *rsp_sg;
2815 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2816 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2818 response = ccb->smpio.smp_response;
2820 case CAM_DATA_VADDR:
2821 request = ccb->smpio.smp_request;
2822 response = ccb->smpio.smp_response;
2825 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2830 cm = mps_alloc_command(sc);
2832 mps_dprint(sc, MPS_ERROR,
2833 "%s: cannot allocate command\n", __func__);
2834 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2839 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2840 bzero(req, sizeof(*req));
2841 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2843 /* Allow the chip to use any route to this SAS address. */
2844 req->PhysicalPort = 0xff;
2846 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2848 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2850 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2851 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2853 mpi_init_sge(cm, req, &req->SGL);
2856 * Set up a uio to pass into mps_map_command(). This allows us to
2857 * do one map command, and one busdma call in there.
2859 cm->cm_uio.uio_iov = cm->cm_iovec;
2860 cm->cm_uio.uio_iovcnt = 2;
2861 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2864 * The read/write flag isn't used by busdma, but set it just in
2865 * case. This isn't exactly accurate, either, since we're going in
2868 cm->cm_uio.uio_rw = UIO_WRITE;
2870 cm->cm_iovec[0].iov_base = request;
2871 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2872 cm->cm_iovec[1].iov_base = response;
2873 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2875 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2876 cm->cm_iovec[1].iov_len;
2879 * Trigger a warning message in mps_data_cb() for the user if we
2880 * wind up exceeding two S/G segments. The chip expects one
2881 * segment for the request and another for the response.
2883 cm->cm_max_segs = 2;
2885 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2886 cm->cm_complete = mpssas_smpio_complete;
2887 cm->cm_complete_data = ccb;
2890 * Tell the mapping code that we're using a uio, and that this is
2891 * an SMP passthrough request. There is a little special-case
2892 * logic there (in mps_data_cb()) to handle the bidirectional
2895 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2896 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2898 /* The chip data format is little endian. */
2899 req->SASAddress.High = htole32(sasaddr >> 32);
2900 req->SASAddress.Low = htole32(sasaddr);
2903 * XXX Note that we don't have a timeout/abort mechanism here.
2904 * From the manual, it looks like task management requests only
2905 * work for SCSI IO and SATA passthrough requests. We may need to
2906 * have a mechanism to retry requests in the event of a chip reset
2907 * at least. Hopefully the chip will insure that any errors short
2908 * of that are relayed back to the driver.
2910 error = mps_map_command(sc, cm);
2911 if ((error != 0) && (error != EINPROGRESS)) {
2912 mps_dprint(sc, MPS_ERROR,
2913 "%s: error %d returned from mps_map_command()\n",
2921 mps_free_command(sc, cm);
2922 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2929 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2931 struct mps_softc *sc;
2932 struct mpssas_target *targ;
2933 uint64_t sasaddr = 0;
2938 * Make sure the target exists.
2940 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2941 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2942 targ = &sassc->targets[ccb->ccb_h.target_id];
2943 if (targ->handle == 0x0) {
2944 mps_dprint(sc, MPS_ERROR,
2945 "%s: target %d does not exist!\n", __func__,
2946 ccb->ccb_h.target_id);
2947 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2953 * If this device has an embedded SMP target, we'll talk to it
2955 * figure out what the expander's address is.
2957 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2958 sasaddr = targ->sasaddr;
2961 * If we don't have a SAS address for the expander yet, try
2962 * grabbing it from the page 0x83 information cached in the
2963 * transport layer for this target. LSI expanders report the
2964 * expander SAS address as the port-associated SAS address in
2965 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2968 * XXX KDM disable this for now, but leave it commented out so that
2969 * it is obvious that this is another possible way to get the SAS
2972 * The parent handle method below is a little more reliable, and
2973 * the other benefit is that it works for devices other than SES
2974 * devices. So you can send a SMP request to a da(4) device and it
2975 * will get routed to the expander that device is attached to.
2976 * (Assuming the da(4) device doesn't contain an SMP target...)
2980 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2984 * If we still don't have a SAS address for the expander, look for
2985 * the parent device of this device, which is probably the expander.
2988 #ifdef OLD_MPS_PROBE
2989 struct mpssas_target *parent_target;
2992 if (targ->parent_handle == 0x0) {
2993 mps_dprint(sc, MPS_ERROR,
2994 "%s: handle %d does not have a valid "
2995 "parent handle!\n", __func__, targ->handle);
2996 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2999 #ifdef OLD_MPS_PROBE
3000 parent_target = mpssas_find_target_by_handle(sassc, 0,
3001 targ->parent_handle);
3003 if (parent_target == NULL) {
3004 mps_dprint(sc, MPS_ERROR,
3005 "%s: handle %d does not have a valid "
3006 "parent target!\n", __func__, targ->handle);
3007 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3011 if ((parent_target->devinfo &
3012 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3013 mps_dprint(sc, MPS_ERROR,
3014 "%s: handle %d parent %d does not "
3015 "have an SMP target!\n", __func__,
3016 targ->handle, parent_target->handle);
3017 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3022 sasaddr = parent_target->sasaddr;
3023 #else /* OLD_MPS_PROBE */
3024 if ((targ->parent_devinfo &
3025 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3026 mps_dprint(sc, MPS_ERROR,
3027 "%s: handle %d parent %d does not "
3028 "have an SMP target!\n", __func__,
3029 targ->handle, targ->parent_handle);
3030 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3034 if (targ->parent_sasaddr == 0x0) {
3035 mps_dprint(sc, MPS_ERROR,
3036 "%s: handle %d parent handle %d does "
3037 "not have a valid SAS address!\n",
3038 __func__, targ->handle, targ->parent_handle);
3039 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3043 sasaddr = targ->parent_sasaddr;
3044 #endif /* OLD_MPS_PROBE */
3049 mps_dprint(sc, MPS_INFO,
3050 "%s: unable to find SAS address for handle %d\n",
3051 __func__, targ->handle);
3052 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3055 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3063 #endif //__FreeBSD_version >= 900026
3066 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3068 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3069 struct mps_softc *sc;
3070 struct mps_command *tm;
3071 struct mpssas_target *targ;
3073 MPS_FUNCTRACE(sassc->sc);
3074 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3076 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3077 ("Target %d out of bounds in XPT_RESET_DEV\n",
3078 ccb->ccb_h.target_id));
3080 tm = mps_alloc_command(sc);
3082 mps_dprint(sc, MPS_ERROR,
3083 "command alloc failure in mpssas_action_resetdev\n");
3084 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3089 targ = &sassc->targets[ccb->ccb_h.target_id];
3090 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3091 req->DevHandle = htole16(targ->handle);
3092 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3093 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3095 /* SAS Hard Link Reset / SATA Link Reset */
3096 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3099 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3100 tm->cm_complete = mpssas_resetdev_complete;
3101 tm->cm_complete_data = ccb;
3103 mps_map_command(sc, tm);
3107 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3109 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3113 mtx_assert(&sc->mps_mtx, MA_OWNED);
3115 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3116 ccb = tm->cm_complete_data;
3119 * Currently there should be no way we can hit this case. It only
3120 * happens when we have a failure to allocate chain frames, and
3121 * task management commands don't have S/G lists.
3123 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3124 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3126 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3128 mps_dprint(sc, MPS_ERROR,
3129 "%s: cm_flags = %#x for reset of handle %#04x! "
3130 "This should not happen!\n", __func__, tm->cm_flags,
3132 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3136 mps_dprint(sc, MPS_XINFO,
3137 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3138 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3140 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3141 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3142 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3146 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3150 mpssas_free_tm(sc, tm);
3155 mpssas_poll(struct cam_sim *sim)
3157 struct mpssas_softc *sassc;
3159 sassc = cam_sim_softc(sim);
3161 if (sassc->sc->mps_debug & MPS_TRACE) {
3162 /* frequent debug messages during a panic just slow
3163 * everything down too much.
3165 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3166 sassc->sc->mps_debug &= ~MPS_TRACE;
3169 mps_intr_locked(sassc->sc);
3173 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3176 struct mps_softc *sc;
3178 sc = (struct mps_softc *)callback_arg;
3181 #if (__FreeBSD_version >= 1000006) || \
3182 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3183 case AC_ADVINFO_CHANGED: {
3184 struct mpssas_target *target;
3185 struct mpssas_softc *sassc;
3186 struct scsi_read_capacity_data_long rcap_buf;
3187 struct ccb_dev_advinfo cdai;
3188 struct mpssas_lun *lun;
3193 buftype = (uintptr_t)arg;
3199 * We're only interested in read capacity data changes.
3201 if (buftype != CDAI_TYPE_RCAPLONG)
3205 * We should have a handle for this, but check to make sure.
3207 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3208 ("Target %d out of bounds in mpssas_async\n",
3209 xpt_path_target_id(path)));
3210 target = &sassc->targets[xpt_path_target_id(path)];
3211 if (target->handle == 0)
3214 lunid = xpt_path_lun_id(path);
3216 SLIST_FOREACH(lun, &target->luns, lun_link) {
3217 if (lun->lun_id == lunid) {
3223 if (found_lun == 0) {
3224 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3227 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3228 "LUN for EEDP support.\n");
3231 lun->lun_id = lunid;
3232 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3235 bzero(&rcap_buf, sizeof(rcap_buf));
3236 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3237 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3238 cdai.ccb_h.flags = CAM_DIR_IN;
3239 cdai.buftype = CDAI_TYPE_RCAPLONG;
3240 #if (__FreeBSD_version >= 1100061) || \
3241 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3242 cdai.flags = CDAI_FLAG_NONE;
3246 cdai.bufsiz = sizeof(rcap_buf);
3247 cdai.buf = (uint8_t *)&rcap_buf;
3248 xpt_action((union ccb *)&cdai);
3249 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3250 cam_release_devq(cdai.ccb_h.path,
3253 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3254 && (rcap_buf.prot & SRC16_PROT_EN)) {
3255 lun->eedp_formatted = TRUE;
3256 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3258 lun->eedp_formatted = FALSE;
3259 lun->eedp_block_size = 0;
3264 case AC_FOUND_DEVICE: {
3265 struct ccb_getdev *cgd;
3268 mpssas_check_eedp(sc, path, cgd);
3277 #if (__FreeBSD_version < 901503) || \
3278 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3280 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3281 struct ccb_getdev *cgd)
3283 struct mpssas_softc *sassc = sc->sassc;
3284 struct ccb_scsiio *csio;
3285 struct scsi_read_capacity_16 *scsi_cmd;
3286 struct scsi_read_capacity_eedp *rcap_buf;
3288 target_id_t targetid;
3291 struct cam_path *local_path;
3292 struct mpssas_target *target;
3293 struct mpssas_lun *lun;
3298 pathid = cam_sim_path(sassc->sim);
3299 targetid = xpt_path_target_id(path);
3300 lunid = xpt_path_lun_id(path);
3302 KASSERT(targetid < sassc->maxtargets,
3303 ("Target %d out of bounds in mpssas_check_eedp\n",
3305 target = &sassc->targets[targetid];
3306 if (target->handle == 0x0)
3310 * Determine if the device is EEDP capable.
3312 * If this flag is set in the inquiry data,
3313 * the device supports protection information,
3314 * and must support the 16 byte read
3315 * capacity command, otherwise continue without
3316 * sending read cap 16
3318 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3322 * Issue a READ CAPACITY 16 command. This info
3323 * is used to determine if the LUN is formatted
3326 ccb = xpt_alloc_ccb_nowait();
3328 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3329 "for EEDP support.\n");
3333 if (xpt_create_path(&local_path, xpt_periph,
3334 pathid, targetid, lunid) != CAM_REQ_CMP) {
3335 mps_dprint(sc, MPS_ERROR, "Unable to create "
3336 "path for EEDP support\n");
3342 * If LUN is already in list, don't create a new
3346 SLIST_FOREACH(lun, &target->luns, lun_link) {
3347 if (lun->lun_id == lunid) {
3353 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3356 mps_dprint(sc, MPS_ERROR,
3357 "Unable to alloc LUN for EEDP support.\n");
3358 xpt_free_path(local_path);
3362 lun->lun_id = lunid;
3363 SLIST_INSERT_HEAD(&target->luns, lun,
3367 xpt_path_string(local_path, path_str, sizeof(path_str));
3370 * If this is a SATA direct-access end device,
3371 * mark it so that a SCSI StartStopUnit command
3372 * will be sent to it when the driver is being
3375 if ((cgd.inq_data.device == T_DIRECT) &&
3376 (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3377 ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3378 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3379 lun->stop_at_shutdown = TRUE;
3382 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3383 path_str, target->handle);
3386 * Issue a READ CAPACITY 16 command for the LUN.
3387 * The mpssas_read_cap_done function will load
3388 * the read cap info into the LUN struct.
3390 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3391 M_MPT2, M_NOWAIT | M_ZERO);
3392 if (rcap_buf == NULL) {
3393 mps_dprint(sc, MPS_FAULT,
3394 "Unable to alloc read capacity buffer for EEDP support.\n");
3395 xpt_free_path(ccb->ccb_h.path);
3399 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3401 csio->ccb_h.func_code = XPT_SCSI_IO;
3402 csio->ccb_h.flags = CAM_DIR_IN;
3403 csio->ccb_h.retry_count = 4;
3404 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3405 csio->ccb_h.timeout = 60000;
3406 csio->data_ptr = (uint8_t *)rcap_buf;
3407 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3408 csio->sense_len = MPS_SENSE_LEN;
3409 csio->cdb_len = sizeof(*scsi_cmd);
3410 csio->tag_action = MSG_SIMPLE_Q_TAG;
3412 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3413 bzero(scsi_cmd, sizeof(*scsi_cmd));
3414 scsi_cmd->opcode = 0x9E;
3415 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3416 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3418 ccb->ccb_h.ppriv_ptr1 = sassc;
3423 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3425 struct mpssas_softc *sassc;
3426 struct mpssas_target *target;
3427 struct mpssas_lun *lun;
3428 struct scsi_read_capacity_eedp *rcap_buf;
3430 if (done_ccb == NULL)
3433 /* Driver need to release devq, it Scsi command is
3434 * generated by driver internally.
3435 * Currently there is a single place where driver
3436 * calls scsi command internally. In future if driver
3437 * calls more scsi command internally, it needs to release
3438 * devq internally, since those command will not go back to
3441 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3442 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3443 xpt_release_devq(done_ccb->ccb_h.path,
3444 /*count*/ 1, /*run_queue*/TRUE);
3447 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3450 * Get the LUN ID for the path and look it up in the LUN list for the
3453 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3454 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3455 ("Target %d out of bounds in mpssas_read_cap_done\n",
3456 done_ccb->ccb_h.target_id));
3457 target = &sassc->targets[done_ccb->ccb_h.target_id];
3458 SLIST_FOREACH(lun, &target->luns, lun_link) {
3459 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3463 * Got the LUN in the target's LUN list. Fill it in
3464 * with EEDP info. If the READ CAP 16 command had some
3465 * SCSI error (common if command is not supported), mark
3466 * the lun as not supporting EEDP and set the block size
3469 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3470 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3471 lun->eedp_formatted = FALSE;
3472 lun->eedp_block_size = 0;
3476 if (rcap_buf->protect & 0x01) {
3477 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3478 "target ID %d is formatted for EEDP "
3479 "support.\n", done_ccb->ccb_h.target_lun,
3480 done_ccb->ccb_h.target_id);
3481 lun->eedp_formatted = TRUE;
3482 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3487 // Finished with this CCB and path.
3488 free(rcap_buf, M_MPT2);
3489 xpt_free_path(done_ccb->ccb_h.path);
3490 xpt_free_ccb(done_ccb);
3492 #endif /* (__FreeBSD_version < 901503) || \
3493 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3496 mpssas_startup(struct mps_softc *sc)
3500 * Send the port enable message and set the wait_for_port_enable flag.
3501 * This flag helps to keep the simq frozen until all discovery events
3504 sc->wait_for_port_enable = 1;
3505 mpssas_send_portenable(sc);
3510 mpssas_send_portenable(struct mps_softc *sc)
3512 MPI2_PORT_ENABLE_REQUEST *request;
3513 struct mps_command *cm;
3517 if ((cm = mps_alloc_command(sc)) == NULL)
3519 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3520 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3521 request->MsgFlags = 0;
3523 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3524 cm->cm_complete = mpssas_portenable_complete;
3528 mps_map_command(sc, cm);
3529 mps_dprint(sc, MPS_XINFO,
3530 "mps_send_portenable finished cm %p req %p complete %p\n",
3531 cm, cm->cm_req, cm->cm_complete);
3536 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3538 MPI2_PORT_ENABLE_REPLY *reply;
3539 struct mpssas_softc *sassc;
3545 * Currently there should be no way we can hit this case. It only
3546 * happens when we have a failure to allocate chain frames, and
3547 * port enable commands don't have S/G lists.
3549 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3550 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3551 "This should not happen!\n", __func__, cm->cm_flags);
3554 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3556 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3557 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3558 MPI2_IOCSTATUS_SUCCESS)
3559 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3561 mps_free_command(sc, cm);
3562 if (sc->mps_ich.ich_arg != NULL) {
3563 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3564 config_intrhook_disestablish(&sc->mps_ich);
3565 sc->mps_ich.ich_arg = NULL;
3569 * Get WarpDrive info after discovery is complete but before the scan
3570 * starts. At this point, all devices are ready to be exposed to the
3571 * OS. If devices should be hidden instead, take them out of the
3572 * 'targets' array before the scan. The devinfo for a disk will have
3573 * some info and a volume's will be 0. Use that to remove disks.
3575 mps_wd_config_pages(sc);
3578 * Done waiting for port enable to complete. Decrement the refcount.
3579 * If refcount is 0, discovery is complete and a rescan of the bus can
3580 * take place. Since the simq was explicitly frozen before port
3581 * enable, it must be explicitly released here to keep the
3582 * freeze/release count in sync.
3584 sc->wait_for_port_enable = 0;
3585 sc->port_enable_complete = 1;
3586 wakeup(&sc->port_enable_complete);
3587 mpssas_startup_decrement(sassc);
3591 mpssas_check_id(struct mpssas_softc *sassc, int id)
3593 struct mps_softc *sc = sassc->sc;
3597 ids = &sc->exclude_ids[0];
3598 while((name = strsep(&ids, ",")) != NULL) {
3599 if (name[0] == '\0')
3601 if (strtol(name, NULL, 0) == (long)id)
3609 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3611 struct mpssas_softc *sassc;
3612 struct mpssas_lun *lun, *lun_tmp;
3613 struct mpssas_target *targ;
3618 * The number of targets is based on IOC Facts, so free all of
3619 * the allocated LUNs for each target and then the target buffer
3622 for (i=0; i< maxtargets; i++) {
3623 targ = &sassc->targets[i];
3624 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3628 free(sassc->targets, M_MPT2);
3630 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3631 M_MPT2, M_WAITOK|M_ZERO);
3632 if (!sassc->targets) {
3633 panic("%s failed to alloc targets with error %d\n",