2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * LSI MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for LSI MPT2 */
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
88 #define MPSSAS_DISCOVERY_TIMEOUT 20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
119 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
120 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
121 static void mpssas_poll(struct cam_sim *sim);
122 static void mpssas_scsiio_timeout(void *data);
123 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
124 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
125 struct mps_command *cm, union ccb *ccb);
126 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
127 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
128 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
129 #if __FreeBSD_version >= 900026
130 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
131 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
133 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
134 #endif //FreeBSD_version >= 900026
135 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
136 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
137 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
138 static void mpssas_async(void *callback_arg, uint32_t code,
139 struct cam_path *path, void *arg);
140 #if (__FreeBSD_version < 901503) || \
141 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
142 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
143 struct ccb_getdev *cgd);
144 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
146 static int mpssas_send_portenable(struct mps_softc *sc);
147 static void mpssas_portenable_complete(struct mps_softc *sc,
148 struct mps_command *cm);
150 struct mpssas_target *
151 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
153 struct mpssas_target *target;
156 for (i = start; i < sassc->maxtargets; i++) {
157 target = &sassc->targets[i];
158 if (target->handle == handle)
165 /* we need to freeze the simq during attach and diag reset, to avoid failing
166 * commands before device handles have been found by discovery. Since
167 * discovery involves reading config pages and possibly sending commands,
168 * discovery actions may continue even after we receive the end of discovery
169 * event, so refcount discovery actions instead of assuming we can unfreeze
170 * the simq when we get the event.
173 mpssas_startup_increment(struct mpssas_softc *sassc)
175 MPS_FUNCTRACE(sassc->sc);
177 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
178 if (sassc->startup_refcount++ == 0) {
179 /* just starting, freeze the simq */
180 mps_dprint(sassc->sc, MPS_INIT,
181 "%s freezing simq\n", __func__);
182 #if __FreeBSD_version >= 1000039
185 xpt_freeze_simq(sassc->sim, 1);
187 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
188 sassc->startup_refcount);
193 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
195 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
196 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
197 xpt_release_simq(sassc->sim, 1);
198 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
203 mpssas_startup_decrement(struct mpssas_softc *sassc)
205 MPS_FUNCTRACE(sassc->sc);
207 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
208 if (--sassc->startup_refcount == 0) {
209 /* finished all discovery-related actions, release
210 * the simq and rescan for the latest topology.
212 mps_dprint(sassc->sc, MPS_INIT,
213 "%s releasing simq\n", __func__);
214 sassc->flags &= ~MPSSAS_IN_STARTUP;
215 xpt_release_simq(sassc->sim, 1);
216 #if __FreeBSD_version >= 1000039
219 mpssas_rescan_target(sassc->sc, NULL);
222 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
223 sassc->startup_refcount);
227 /* LSI's firmware requires us to stop sending commands when we're doing task
228 * management, so refcount the TMs and keep the simq frozen when any are in
232 mpssas_alloc_tm(struct mps_softc *sc)
234 struct mps_command *tm;
237 tm = mps_alloc_high_priority_command(sc);
239 if (sc->sassc->tm_count++ == 0) {
240 mps_dprint(sc, MPS_RECOVERY,
241 "%s freezing simq\n", __func__);
242 xpt_freeze_simq(sc->sassc->sim, 1);
244 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
245 sc->sassc->tm_count);
251 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
253 mps_dprint(sc, MPS_TRACE, "%s", __func__);
257 /* if there are no TMs in use, we can release the simq. We use our
258 * own refcount so that it's easier for a diag reset to cleanup and
261 if (--sc->sassc->tm_count == 0) {
262 mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
263 xpt_release_simq(sc->sassc->sim, 1);
265 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
266 sc->sassc->tm_count);
268 mps_free_high_priority_command(sc, tm);
272 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
274 struct mpssas_softc *sassc = sc->sassc;
276 target_id_t targetid;
280 pathid = cam_sim_path(sassc->sim);
282 targetid = CAM_TARGET_WILDCARD;
284 targetid = targ - sassc->targets;
287 * Allocate a CCB and schedule a rescan.
289 ccb = xpt_alloc_ccb_nowait();
291 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
295 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
296 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
302 if (targetid == CAM_TARGET_WILDCARD)
303 ccb->ccb_h.func_code = XPT_SCAN_BUS;
305 ccb->ccb_h.func_code = XPT_SCAN_TGT;
307 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
312 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
322 /* No need to be in here if debugging isn't enabled */
323 if ((cm->cm_sc->mps_debug & level) == 0)
326 sbuf_new(&sb, str, sizeof(str), 0);
330 if (cm->cm_ccb != NULL) {
331 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
333 sbuf_cat(&sb, path_str);
334 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
335 scsi_command_string(&cm->cm_ccb->csio, &sb);
336 sbuf_printf(&sb, "length %d ",
337 cm->cm_ccb->csio.dxfer_len);
341 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 cam_sim_name(cm->cm_sc->sassc->sim),
343 cam_sim_unit(cm->cm_sc->sassc->sim),
344 cam_sim_bus(cm->cm_sc->sassc->sim),
345 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 sbuf_vprintf(&sb, fmt, ap);
352 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
359 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
361 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
362 struct mpssas_target *targ;
367 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
368 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
372 /* XXX retry the remove after the diag reset completes? */
373 mps_dprint(sc, MPS_FAULT,
374 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
375 mpssas_free_tm(sc, tm);
379 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
380 mps_dprint(sc, MPS_FAULT,
381 "IOCStatus = 0x%x while resetting device 0x%x\n",
382 reply->IOCStatus, handle);
383 mpssas_free_tm(sc, tm);
387 mps_dprint(sc, MPS_XINFO,
388 "Reset aborted %u commands\n", reply->TerminationCount);
389 mps_free_reply(sc, tm->cm_reply_data);
390 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
392 mps_dprint(sc, MPS_XINFO,
393 "clearing target %u handle 0x%04x\n", targ->tid, handle);
396 * Don't clear target if remove fails because things will get confusing.
397 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 * this target id if possible, and so we can assign the same target id
399 * to this device if it comes back in the future.
401 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
404 targ->encl_handle = 0x0;
405 targ->encl_slot = 0x0;
406 targ->exp_dev_handle = 0x0;
408 targ->linkrate = 0x0;
413 mpssas_free_tm(sc, tm);
418 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
419 * Otherwise Volume Delete is same as Bare Drive Removal.
422 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
424 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
425 struct mps_softc *sc;
426 struct mps_command *cm;
427 struct mpssas_target *targ = NULL;
429 MPS_FUNCTRACE(sassc->sc);
434 * If this is a WD controller, determine if the disk should be exposed
435 * to the OS or not. If disk should be exposed, return from this
436 * function without doing anything.
438 if (sc->WD_available && (sc->WD_hide_expose ==
439 MPS_WD_EXPOSE_ALWAYS)) {
444 targ = mpssas_find_target_by_handle(sassc, 0, handle);
446 /* FIXME: what is the action? */
447 /* We don't know about this device? */
448 mps_dprint(sc, MPS_ERROR,
449 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
453 targ->flags |= MPSSAS_TARGET_INREMOVAL;
455 cm = mpssas_alloc_tm(sc);
457 mps_dprint(sc, MPS_ERROR,
458 "%s: command alloc failure\n", __func__);
462 mpssas_rescan_target(sc, targ);
464 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
465 req->DevHandle = targ->handle;
466 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
467 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
469 /* SAS Hard Link Reset / SATA Link Reset */
470 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
474 cm->cm_desc.HighPriority.RequestFlags =
475 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
476 cm->cm_complete = mpssas_remove_volume;
477 cm->cm_complete_data = (void *)(uintptr_t)handle;
478 mps_map_command(sc, cm);
482 * The MPT2 firmware performs debounce on the link to avoid transient link
483 * errors and false removals. When it does decide that link has been lost
484 * and a device need to go away, it expects that the host will perform a
485 * target reset and then an op remove. The reset has the side-effect of
486 * aborting any outstanding requests for the device, which is required for
487 * the op-remove to succeed. It's not clear if the host should check for
488 * the device coming back alive after the reset.
491 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
493 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
494 struct mps_softc *sc;
495 struct mps_command *cm;
496 struct mpssas_target *targ = NULL;
498 MPS_FUNCTRACE(sassc->sc);
502 targ = mpssas_find_target_by_handle(sassc, 0, handle);
504 /* FIXME: what is the action? */
505 /* We don't know about this device? */
506 mps_dprint(sc, MPS_ERROR,
507 "%s : invalid handle 0x%x \n", __func__, handle);
511 targ->flags |= MPSSAS_TARGET_INREMOVAL;
513 cm = mpssas_alloc_tm(sc);
515 mps_dprint(sc, MPS_ERROR,
516 "%s: command alloc failure\n", __func__);
520 mpssas_rescan_target(sc, targ);
522 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
523 memset(req, 0, sizeof(*req));
524 req->DevHandle = htole16(targ->handle);
525 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
526 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
528 /* SAS Hard Link Reset / SATA Link Reset */
529 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
533 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
534 cm->cm_complete = mpssas_remove_device;
535 cm->cm_complete_data = (void *)(uintptr_t)handle;
536 mps_map_command(sc, cm);
540 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
542 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
543 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
544 struct mpssas_target *targ;
545 struct mps_command *next_cm;
550 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
551 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
555 * Currently there should be no way we can hit this case. It only
556 * happens when we have a failure to allocate chain frames, and
557 * task management commands don't have S/G lists.
559 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
560 mps_dprint(sc, MPS_ERROR,
561 "%s: cm_flags = %#x for remove of handle %#04x! "
562 "This should not happen!\n", __func__, tm->cm_flags,
564 mpssas_free_tm(sc, tm);
569 /* XXX retry the remove after the diag reset completes? */
570 mps_dprint(sc, MPS_FAULT,
571 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
572 mpssas_free_tm(sc, tm);
576 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
577 mps_dprint(sc, MPS_FAULT,
578 "IOCStatus = 0x%x while resetting device 0x%x\n",
579 le16toh(reply->IOCStatus), handle);
580 mpssas_free_tm(sc, tm);
584 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
585 le32toh(reply->TerminationCount));
586 mps_free_reply(sc, tm->cm_reply_data);
587 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
589 /* Reuse the existing command */
590 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
591 memset(req, 0, sizeof(*req));
592 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
593 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
594 req->DevHandle = htole16(handle);
596 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
597 tm->cm_complete = mpssas_remove_complete;
598 tm->cm_complete_data = (void *)(uintptr_t)handle;
600 mps_map_command(sc, tm);
602 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
604 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
607 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
608 ccb = tm->cm_complete_data;
609 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
610 mpssas_scsiio_complete(sc, tm);
615 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
617 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
619 struct mpssas_target *targ;
620 struct mpssas_lun *lun;
624 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
625 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628 * Currently there should be no way we can hit this case. It only
629 * happens when we have a failure to allocate chain frames, and
630 * task management commands don't have S/G lists.
632 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
633 mps_dprint(sc, MPS_XINFO,
634 "%s: cm_flags = %#x for remove of handle %#04x! "
635 "This should not happen!\n", __func__, tm->cm_flags,
637 mpssas_free_tm(sc, tm);
642 /* most likely a chip reset */
643 mps_dprint(sc, MPS_FAULT,
644 "%s NULL reply removing device 0x%04x\n", __func__, handle);
645 mpssas_free_tm(sc, tm);
649 mps_dprint(sc, MPS_XINFO,
650 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
651 handle, le16toh(reply->IOCStatus));
654 * Don't clear target if remove fails because things will get confusing.
655 * Leave the devname and sasaddr intact so that we know to avoid reusing
656 * this target id if possible, and so we can assign the same target id
657 * to this device if it comes back in the future.
659 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
662 targ->encl_handle = 0x0;
663 targ->encl_slot = 0x0;
664 targ->exp_dev_handle = 0x0;
666 targ->linkrate = 0x0;
670 while(!SLIST_EMPTY(&targ->luns)) {
671 lun = SLIST_FIRST(&targ->luns);
672 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
678 mpssas_free_tm(sc, tm);
682 mpssas_register_events(struct mps_softc *sc)
684 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
687 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
688 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
689 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
690 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
691 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
692 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
693 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
694 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
695 setbit(events, MPI2_EVENT_IR_VOLUME);
696 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
697 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
698 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
700 mps_register_events(sc, events, mpssas_evt_handler, NULL,
701 &sc->sassc->mpssas_eh);
707 mps_attach_sas(struct mps_softc *sc)
709 struct mpssas_softc *sassc;
715 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
717 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
723 * XXX MaxTargets could change during a reinit. Since we don't
724 * resize the targets[] array during such an event, cache the value
725 * of MaxTargets here so that we don't get into trouble later. This
726 * should move into the reinit logic.
728 sassc->maxtargets = sc->facts->MaxTargets;
729 sassc->targets = malloc(sizeof(struct mpssas_target) *
730 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
731 if(!sassc->targets) {
732 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
740 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
741 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
746 unit = device_get_unit(sc->mps_dev);
747 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
748 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
749 if (sassc->sim == NULL) {
750 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
755 TAILQ_INIT(&sassc->ev_queue);
757 /* Initialize taskqueue for Event Handling */
758 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
759 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
760 taskqueue_thread_enqueue, &sassc->ev_tq);
761 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
762 device_get_nameunit(sc->mps_dev));
767 * XXX There should be a bus for every port on the adapter, but since
768 * we're just going to fake the topology for now, we'll pretend that
769 * everything is just a target on a single bus.
771 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
772 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
779 * Assume that discovery events will start right away.
781 * Hold off boot until discovery is complete.
783 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
784 sc->sassc->startup_refcount = 0;
785 mpssas_startup_increment(sassc);
787 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
788 sassc->discovery_timeouts = 0;
793 * Register for async events so we can determine the EEDP
794 * capabilities of devices.
796 status = xpt_create_path(&sassc->path, /*periph*/NULL,
797 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
799 if (status != CAM_REQ_CMP) {
800 mps_printf(sc, "Error %#x creating sim path\n", status);
805 #if (__FreeBSD_version >= 1000006) || \
806 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
807 event = AC_ADVINFO_CHANGED;
809 event = AC_FOUND_DEVICE;
811 status = xpt_register_async(event, mpssas_async, sc,
813 if (status != CAM_REQ_CMP) {
814 mps_dprint(sc, MPS_ERROR,
815 "Error %#x registering async handler for "
816 "AC_ADVINFO_CHANGED events\n", status);
817 xpt_free_path(sassc->path);
821 if (status != CAM_REQ_CMP) {
823 * EEDP use is the exception, not the rule.
824 * Warn the user, but do not fail to attach.
826 mps_printf(sc, "EEDP capabilities disabled.\n");
831 mpssas_register_events(sc);
839 mps_detach_sas(struct mps_softc *sc)
841 struct mpssas_softc *sassc;
842 struct mpssas_lun *lun, *lun_tmp;
843 struct mpssas_target *targ;
848 if (sc->sassc == NULL)
852 mps_deregister_events(sc, sassc->mpssas_eh);
855 * Drain and free the event handling taskqueue with the lock
856 * unheld so that any parallel processing tasks drain properly
857 * without deadlocking.
859 if (sassc->ev_tq != NULL)
860 taskqueue_free(sassc->ev_tq);
862 /* Make sure CAM doesn't wedge if we had to bail out early. */
865 /* Deregister our async handler */
866 if (sassc->path != NULL) {
867 xpt_register_async(0, mpssas_async, sc, sassc->path);
868 xpt_free_path(sassc->path);
872 if (sassc->flags & MPSSAS_IN_STARTUP)
873 xpt_release_simq(sassc->sim, 1);
875 if (sassc->sim != NULL) {
876 xpt_bus_deregister(cam_sim_path(sassc->sim));
877 cam_sim_free(sassc->sim, FALSE);
880 sassc->flags |= MPSSAS_SHUTDOWN;
883 if (sassc->devq != NULL)
884 cam_simq_free(sassc->devq);
886 for(i=0; i< sassc->maxtargets ;i++) {
887 targ = &sassc->targets[i];
888 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
892 free(sassc->targets, M_MPT2);
900 mpssas_discovery_end(struct mpssas_softc *sassc)
902 struct mps_softc *sc = sassc->sc;
906 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
907 callout_stop(&sassc->discovery_callout);
912 mpssas_action(struct cam_sim *sim, union ccb *ccb)
914 struct mpssas_softc *sassc;
916 sassc = cam_sim_softc(sim);
918 MPS_FUNCTRACE(sassc->sc);
919 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
920 ccb->ccb_h.func_code);
921 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
923 switch (ccb->ccb_h.func_code) {
926 struct ccb_pathinq *cpi = &ccb->cpi;
928 cpi->version_num = 1;
929 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
930 cpi->target_sprt = 0;
931 #if __FreeBSD_version >= 1000039
932 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
934 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
936 cpi->hba_eng_cnt = 0;
937 cpi->max_target = sassc->maxtargets - 1;
939 cpi->initiator_id = sassc->maxtargets - 1;
940 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
941 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
942 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
943 cpi->unit_number = cam_sim_unit(sim);
944 cpi->bus_id = cam_sim_bus(sim);
945 cpi->base_transfer_speed = 150000;
946 cpi->transport = XPORT_SAS;
947 cpi->transport_version = 0;
948 cpi->protocol = PROTO_SCSI;
949 cpi->protocol_version = SCSI_REV_SPC;
950 #if __FreeBSD_version >= 800001
952 * XXX KDM where does this number come from?
954 cpi->maxio = 256 * 1024;
956 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
959 case XPT_GET_TRAN_SETTINGS:
961 struct ccb_trans_settings *cts;
962 struct ccb_trans_settings_sas *sas;
963 struct ccb_trans_settings_scsi *scsi;
964 struct mpssas_target *targ;
967 sas = &cts->xport_specific.sas;
968 scsi = &cts->proto_specific.scsi;
970 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
971 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
972 cts->ccb_h.target_id));
973 targ = &sassc->targets[cts->ccb_h.target_id];
974 if (targ->handle == 0x0) {
975 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
979 cts->protocol_version = SCSI_REV_SPC2;
980 cts->transport = XPORT_SAS;
981 cts->transport_version = 0;
983 sas->valid = CTS_SAS_VALID_SPEED;
984 switch (targ->linkrate) {
986 sas->bitrate = 150000;
989 sas->bitrate = 300000;
992 sas->bitrate = 600000;
998 cts->protocol = PROTO_SCSI;
999 scsi->valid = CTS_SCSI_VALID_TQ;
1000 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1002 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1005 case XPT_CALC_GEOMETRY:
1006 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1007 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1010 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1011 mpssas_action_resetdev(sassc, ccb);
1016 mps_dprint(sassc->sc, MPS_XINFO,
1017 "mpssas_action faking success for abort or reset\n");
1018 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1021 mpssas_action_scsiio(sassc, ccb);
1023 #if __FreeBSD_version >= 900026
1025 mpssas_action_smpio(sassc, ccb);
1029 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1037 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1038 target_id_t target_id, lun_id_t lun_id)
1040 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1041 struct cam_path *path;
1043 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1044 ac_code, target_id, (uintmax_t)lun_id);
1046 if (xpt_create_path(&path, NULL,
1047 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1048 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1053 xpt_async(ac_code, path, NULL);
1054 xpt_free_path(path);
1058 mpssas_complete_all_commands(struct mps_softc *sc)
1060 struct mps_command *cm;
1065 mtx_assert(&sc->mps_mtx, MA_OWNED);
1067 /* complete all commands with a NULL reply */
1068 for (i = 1; i < sc->num_reqs; i++) {
1069 cm = &sc->commands[i];
1070 cm->cm_reply = NULL;
1073 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1074 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1076 if (cm->cm_complete != NULL) {
1077 mpssas_log_command(cm, MPS_RECOVERY,
1078 "completing cm %p state %x ccb %p for diag reset\n",
1079 cm, cm->cm_state, cm->cm_ccb);
1081 cm->cm_complete(sc, cm);
1085 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1086 mpssas_log_command(cm, MPS_RECOVERY,
1087 "waking up cm %p state %x ccb %p for diag reset\n",
1088 cm, cm->cm_state, cm->cm_ccb);
1093 if (cm->cm_sc->io_cmds_active != 0) {
1094 cm->cm_sc->io_cmds_active--;
1096 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: "
1097 "io_cmds_active is out of sync - resynching to "
1101 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1102 /* this should never happen, but if it does, log */
1103 mpssas_log_command(cm, MPS_RECOVERY,
1104 "cm %p state %x flags 0x%x ccb %p during diag "
1105 "reset\n", cm, cm->cm_state, cm->cm_flags,
1112 mpssas_handle_reinit(struct mps_softc *sc)
1116 /* Go back into startup mode and freeze the simq, so that CAM
1117 * doesn't send any commands until after we've rediscovered all
1118 * targets and found the proper device handles for them.
1120 * After the reset, portenable will trigger discovery, and after all
1121 * discovery-related activities have finished, the simq will be
1124 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1125 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1126 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1127 mpssas_startup_increment(sc->sassc);
1129 /* notify CAM of a bus reset */
1130 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1133 /* complete and cleanup after all outstanding commands */
1134 mpssas_complete_all_commands(sc);
1136 mps_dprint(sc, MPS_INIT,
1137 "%s startup %u tm %u after command completion\n",
1138 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1140 /* zero all the target handles, since they may change after the
1141 * reset, and we have to rediscover all the targets and use the new
1144 for (i = 0; i < sc->sassc->maxtargets; i++) {
1145 if (sc->sassc->targets[i].outstanding != 0)
1146 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1147 i, sc->sassc->targets[i].outstanding);
1148 sc->sassc->targets[i].handle = 0x0;
1149 sc->sassc->targets[i].exp_dev_handle = 0x0;
1150 sc->sassc->targets[i].outstanding = 0;
1151 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1156 mpssas_tm_timeout(void *data)
1158 struct mps_command *tm = data;
1159 struct mps_softc *sc = tm->cm_sc;
1161 mtx_assert(&sc->mps_mtx, MA_OWNED);
1163 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1164 "task mgmt %p timed out\n", tm);
1169 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1171 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1172 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1173 unsigned int cm_count = 0;
1174 struct mps_command *cm;
1175 struct mpssas_target *targ;
1177 callout_stop(&tm->cm_callout);
1179 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1180 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1184 * Currently there should be no way we can hit this case. It only
1185 * happens when we have a failure to allocate chain frames, and
1186 * task management commands don't have S/G lists.
1187 * XXXSL So should it be an assertion?
1189 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1190 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1191 "This should not happen!\n", __func__, tm->cm_flags);
1192 mpssas_free_tm(sc, tm);
1196 if (reply == NULL) {
1197 mpssas_log_command(tm, MPS_RECOVERY,
1198 "NULL reset reply for tm %p\n", tm);
1199 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1200 /* this completion was due to a reset, just cleanup */
1201 targ->flags &= ~MPSSAS_TARGET_INRESET;
1203 mpssas_free_tm(sc, tm);
1206 /* we should have gotten a reply. */
1212 mpssas_log_command(tm, MPS_RECOVERY,
1213 "logical unit reset status 0x%x code 0x%x count %u\n",
1214 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1215 le32toh(reply->TerminationCount));
1217 /* See if there are any outstanding commands for this LUN.
1218 * This could be made more efficient by using a per-LU data
1219 * structure of some sort.
1221 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1222 if (cm->cm_lun == tm->cm_lun)
1226 if (cm_count == 0) {
1227 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1228 "logical unit %u finished recovery after reset\n",
1231 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1234 /* we've finished recovery for this logical unit. check and
1235 * see if some other logical unit has a timedout command
1236 * that needs to be processed.
1238 cm = TAILQ_FIRST(&targ->timedout_commands);
1240 mpssas_send_abort(sc, tm, cm);
1244 mpssas_free_tm(sc, tm);
1248 /* if we still have commands for this LUN, the reset
1249 * effectively failed, regardless of the status reported.
1250 * Escalate to a target reset.
1252 mpssas_log_command(tm, MPS_RECOVERY,
1253 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1255 mpssas_send_reset(sc, tm,
1256 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1261 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1263 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1264 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1265 struct mpssas_target *targ;
1267 callout_stop(&tm->cm_callout);
1269 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1270 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1274 * Currently there should be no way we can hit this case. It only
1275 * happens when we have a failure to allocate chain frames, and
1276 * task management commands don't have S/G lists.
1278 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1279 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1280 "This should not happen!\n", __func__, tm->cm_flags);
1281 mpssas_free_tm(sc, tm);
1285 if (reply == NULL) {
1286 mpssas_log_command(tm, MPS_RECOVERY,
1287 "NULL reset reply for tm %p\n", tm);
1288 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1289 /* this completion was due to a reset, just cleanup */
1290 targ->flags &= ~MPSSAS_TARGET_INRESET;
1292 mpssas_free_tm(sc, tm);
1295 /* we should have gotten a reply. */
1301 mpssas_log_command(tm, MPS_RECOVERY,
1302 "target reset status 0x%x code 0x%x count %u\n",
1303 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1304 le32toh(reply->TerminationCount));
1306 targ->flags &= ~MPSSAS_TARGET_INRESET;
1308 if (targ->outstanding == 0) {
1309 /* we've finished recovery for this target and all
1310 * of its logical units.
1312 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1313 "recovery finished after target reset\n");
1315 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1319 mpssas_free_tm(sc, tm);
1322 /* after a target reset, if this target still has
1323 * outstanding commands, the reset effectively failed,
1324 * regardless of the status reported. escalate.
1326 mpssas_log_command(tm, MPS_RECOVERY,
1327 "target reset complete for tm %p, but still have %u command(s)\n",
1328 tm, targ->outstanding);
1333 #define MPS_RESET_TIMEOUT 30
1336 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1338 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1339 struct mpssas_target *target;
1342 target = tm->cm_targ;
1343 if (target->handle == 0) {
1344 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1345 __func__, target->tid);
1349 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1350 req->DevHandle = htole16(target->handle);
1351 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1352 req->TaskType = type;
1354 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1355 /* XXX Need to handle invalid LUNs */
1356 MPS_SET_LUN(req->LUN, tm->cm_lun);
1357 tm->cm_targ->logical_unit_resets++;
1358 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1359 "sending logical unit reset\n");
1360 tm->cm_complete = mpssas_logical_unit_reset_complete;
1362 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1363 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1364 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1365 tm->cm_targ->target_resets++;
1366 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1367 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1368 "sending target reset\n");
1369 tm->cm_complete = mpssas_target_reset_complete;
1372 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1377 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1378 tm->cm_complete_data = (void *)tm;
1380 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1381 mpssas_tm_timeout, tm);
1383 err = mps_map_command(sc, tm);
1385 mpssas_log_command(tm, MPS_RECOVERY,
1386 "error %d sending reset type %u\n",
1394 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1396 struct mps_command *cm;
1397 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1398 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1399 struct mpssas_target *targ;
1401 callout_stop(&tm->cm_callout);
1403 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1404 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1408 * Currently there should be no way we can hit this case. It only
1409 * happens when we have a failure to allocate chain frames, and
1410 * task management commands don't have S/G lists.
1412 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1413 mpssas_log_command(tm, MPS_RECOVERY,
1414 "cm_flags = %#x for abort %p TaskMID %u!\n",
1415 tm->cm_flags, tm, le16toh(req->TaskMID));
1416 mpssas_free_tm(sc, tm);
1420 if (reply == NULL) {
1421 mpssas_log_command(tm, MPS_RECOVERY,
1422 "NULL abort reply for tm %p TaskMID %u\n",
1423 tm, le16toh(req->TaskMID));
1424 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1425 /* this completion was due to a reset, just cleanup */
1427 mpssas_free_tm(sc, tm);
1430 /* we should have gotten a reply. */
1436 mpssas_log_command(tm, MPS_RECOVERY,
1437 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1438 le16toh(req->TaskMID),
1439 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1440 le32toh(reply->TerminationCount));
1442 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1444 /* if there are no more timedout commands, we're done with
1445 * error recovery for this target.
1447 mpssas_log_command(tm, MPS_RECOVERY,
1448 "finished recovery after aborting TaskMID %u\n",
1449 le16toh(req->TaskMID));
1452 mpssas_free_tm(sc, tm);
1454 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1455 /* abort success, but we have more timedout commands to abort */
1456 mpssas_log_command(tm, MPS_RECOVERY,
1457 "continuing recovery after aborting TaskMID %u\n",
1458 le16toh(req->TaskMID));
1460 mpssas_send_abort(sc, tm, cm);
1463 /* we didn't get a command completion, so the abort
1464 * failed as far as we're concerned. escalate.
1466 mpssas_log_command(tm, MPS_RECOVERY,
1467 "abort failed for TaskMID %u tm %p\n",
1468 le16toh(req->TaskMID), tm);
1470 mpssas_send_reset(sc, tm,
1471 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1475 #define MPS_ABORT_TIMEOUT 5
1478 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1480 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1481 struct mpssas_target *targ;
1485 if (targ->handle == 0) {
1486 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1487 __func__, cm->cm_ccb->ccb_h.target_id);
1491 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1492 "Aborting command %p\n", cm);
1494 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1495 req->DevHandle = htole16(targ->handle);
1496 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1497 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1499 /* XXX Need to handle invalid LUNs */
1500 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1502 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1505 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1506 tm->cm_complete = mpssas_abort_complete;
1507 tm->cm_complete_data = (void *)tm;
1508 tm->cm_targ = cm->cm_targ;
1509 tm->cm_lun = cm->cm_lun;
1511 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1512 mpssas_tm_timeout, tm);
1516 err = mps_map_command(sc, tm);
1518 mpssas_log_command(tm, MPS_RECOVERY,
1519 "error %d sending abort for cm %p SMID %u\n",
1520 err, cm, req->TaskMID);
1526 mpssas_scsiio_timeout(void *data)
1528 struct mps_softc *sc;
1529 struct mps_command *cm;
1530 struct mpssas_target *targ;
1532 cm = (struct mps_command *)data;
1536 mtx_assert(&sc->mps_mtx, MA_OWNED);
1538 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1541 * Run the interrupt handler to make sure it's not pending. This
1542 * isn't perfect because the command could have already completed
1543 * and been re-used, though this is unlikely.
1545 mps_intr_locked(sc);
1546 if (cm->cm_state == MPS_CM_STATE_FREE) {
1547 mpssas_log_command(cm, MPS_XINFO,
1548 "SCSI command %p almost timed out\n", cm);
1552 if (cm->cm_ccb == NULL) {
1553 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1557 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1563 /* XXX first, check the firmware state, to see if it's still
1564 * operational. if not, do a diag reset.
1566 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1567 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1568 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1570 if (targ->tm != NULL) {
1571 /* target already in recovery, just queue up another
1572 * timedout command to be processed later.
1574 mps_dprint(sc, MPS_RECOVERY,
1575 "queued timedout cm %p for processing by tm %p\n",
1578 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1579 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1582 /* start recovery by aborting the first timedout command */
1583 mpssas_send_abort(sc, targ->tm, cm);
1586 /* XXX queue this target up for recovery once a TM becomes
1587 * available. The firmware only has a limited number of
1588 * HighPriority credits for the high priority requests used
1589 * for task management, and we ran out.
1591 * Isilon: don't worry about this for now, since we have
1592 * more credits than disks in an enclosure, and limit
1593 * ourselves to one TM per target for recovery.
1595 mps_dprint(sc, MPS_RECOVERY,
1596 "timedout cm %p failed to allocate a tm\n", cm);
1602 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1604 MPI2_SCSI_IO_REQUEST *req;
1605 struct ccb_scsiio *csio;
1606 struct mps_softc *sc;
1607 struct mpssas_target *targ;
1608 struct mpssas_lun *lun;
1609 struct mps_command *cm;
1610 uint8_t i, lba_byte, *ref_tag_addr;
1611 uint16_t eedp_flags;
1612 uint32_t mpi_control;
1616 mtx_assert(&sc->mps_mtx, MA_OWNED);
1619 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1620 ("Target %d out of bounds in XPT_SCSI_IO\n",
1621 csio->ccb_h.target_id));
1622 targ = &sassc->targets[csio->ccb_h.target_id];
1623 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1624 if (targ->handle == 0x0) {
1625 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1626 __func__, csio->ccb_h.target_id);
1627 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1631 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1632 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1633 "supported %u\n", __func__, csio->ccb_h.target_id);
1634 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1639 * Sometimes, it is possible to get a command that is not "In
1640 * Progress" and was actually aborted by the upper layer. Check for
1641 * this here and complete the command without error.
1643 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1644 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1645 "target %u\n", __func__, csio->ccb_h.target_id);
1650 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1651 * that the volume has timed out. We want volumes to be enumerated
1652 * until they are deleted/removed, not just failed.
1654 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1655 if (targ->devinfo == 0)
1656 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1658 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1663 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1664 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1665 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1670 cm = mps_alloc_command(sc);
1671 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1673 mps_free_command(sc, cm);
1675 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1676 xpt_freeze_simq(sassc->sim, 1);
1677 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1679 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1680 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1685 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1686 bzero(req, sizeof(*req));
1687 req->DevHandle = htole16(targ->handle);
1688 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1690 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1691 req->SenseBufferLength = MPS_SENSE_LEN;
1693 req->ChainOffset = 0;
1694 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1699 req->DataLength = htole32(csio->dxfer_len);
1700 req->BidirectionalDataLength = 0;
1701 req->IoFlags = htole16(csio->cdb_len);
1704 /* Note: BiDirectional transfers are not supported */
1705 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1707 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1708 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1711 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1712 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1716 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1720 if (csio->cdb_len == 32)
1721 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1723 * It looks like the hardware doesn't require an explicit tag
1724 * number for each transaction. SAM Task Management not supported
1727 switch (csio->tag_action) {
1728 case MSG_HEAD_OF_Q_TAG:
1729 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1731 case MSG_ORDERED_Q_TAG:
1732 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1735 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1737 case CAM_TAG_ACTION_NONE:
1738 case MSG_SIMPLE_Q_TAG:
1740 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1743 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1744 req->Control = htole32(mpi_control);
1745 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1746 mps_free_command(sc, cm);
1747 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1752 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1753 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1755 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1756 req->IoFlags = htole16(csio->cdb_len);
1759 * Check if EEDP is supported and enabled. If it is then check if the
1760 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1761 * is formatted for EEDP support. If all of this is true, set CDB up
1762 * for EEDP transfer.
1764 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1765 if (sc->eedp_enabled && eedp_flags) {
1766 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1767 if (lun->lun_id == csio->ccb_h.target_lun) {
1772 if ((lun != NULL) && (lun->eedp_formatted)) {
1773 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1774 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1775 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1776 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1777 req->EEDPFlags = htole16(eedp_flags);
1780 * If CDB less than 32, fill in Primary Ref Tag with
1781 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1782 * already there. Also, set protection bit. FreeBSD
1783 * currently does not support CDBs bigger than 16, but
1784 * the code doesn't hurt, and will be here for the
1787 if (csio->cdb_len != 32) {
1788 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1789 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1790 PrimaryReferenceTag;
1791 for (i = 0; i < 4; i++) {
1793 req->CDB.CDB32[lba_byte + i];
1796 req->CDB.EEDP32.PrimaryReferenceTag =
1797 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1798 req->CDB.EEDP32.PrimaryApplicationTagMask =
1800 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1804 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1805 req->EEDPFlags = htole16(eedp_flags);
1806 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1812 cm->cm_length = csio->dxfer_len;
1813 if (cm->cm_length != 0) {
1815 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1819 cm->cm_sge = &req->SGL;
1820 cm->cm_sglsize = (32 - 24) * 4;
1821 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1822 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1823 cm->cm_complete = mpssas_scsiio_complete;
1824 cm->cm_complete_data = ccb;
1826 cm->cm_lun = csio->ccb_h.target_lun;
1830 * If HBA is a WD and the command is not for a retry, try to build a
1831 * direct I/O message. If failed, or the command is for a retry, send
1832 * the I/O to the IR volume itself.
1834 if (sc->WD_valid_config) {
1835 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1836 mpssas_direct_drive_io(sassc, cm, ccb);
1838 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1842 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1843 mpssas_scsiio_timeout, cm);
1846 targ->outstanding++;
1847 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1848 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1850 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1851 __func__, cm, ccb, targ->outstanding);
1853 mps_map_command(sc, cm);
1858 mps_response_code(struct mps_softc *sc, u8 response_code)
1862 switch (response_code) {
1863 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1864 desc = "task management request completed";
1866 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1867 desc = "invalid frame";
1869 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1870 desc = "task management request not supported";
1872 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1873 desc = "task management request failed";
1875 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1876 desc = "task management request succeeded";
1878 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1879 desc = "invalid lun";
1882 desc = "overlapped tag attempted";
1884 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1885 desc = "task queued, however not sent to target";
1891 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1892 response_code, desc);
1895 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1898 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1899 Mpi2SCSIIOReply_t *mpi_reply)
1903 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1904 MPI2_IOCSTATUS_MASK;
1905 u8 scsi_state = mpi_reply->SCSIState;
1906 u8 scsi_status = mpi_reply->SCSIStatus;
1907 char *desc_ioc_state = NULL;
1908 char *desc_scsi_status = NULL;
1909 char *desc_scsi_state = sc->tmp_string;
1910 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1912 if (log_info == 0x31170000)
1915 switch (ioc_status) {
1916 case MPI2_IOCSTATUS_SUCCESS:
1917 desc_ioc_state = "success";
1919 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1920 desc_ioc_state = "invalid function";
1922 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1923 desc_ioc_state = "scsi recovered error";
1925 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1926 desc_ioc_state = "scsi invalid dev handle";
1928 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1929 desc_ioc_state = "scsi device not there";
1931 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1932 desc_ioc_state = "scsi data overrun";
1934 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1935 desc_ioc_state = "scsi data underrun";
1937 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1938 desc_ioc_state = "scsi io data error";
1940 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1941 desc_ioc_state = "scsi protocol error";
1943 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1944 desc_ioc_state = "scsi task terminated";
1946 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1947 desc_ioc_state = "scsi residual mismatch";
1949 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1950 desc_ioc_state = "scsi task mgmt failed";
1952 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1953 desc_ioc_state = "scsi ioc terminated";
1955 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1956 desc_ioc_state = "scsi ext terminated";
1958 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1959 desc_ioc_state = "eedp guard error";
1961 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1962 desc_ioc_state = "eedp ref tag error";
1964 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1965 desc_ioc_state = "eedp app tag error";
1968 desc_ioc_state = "unknown";
1972 switch (scsi_status) {
1973 case MPI2_SCSI_STATUS_GOOD:
1974 desc_scsi_status = "good";
1976 case MPI2_SCSI_STATUS_CHECK_CONDITION:
1977 desc_scsi_status = "check condition";
1979 case MPI2_SCSI_STATUS_CONDITION_MET:
1980 desc_scsi_status = "condition met";
1982 case MPI2_SCSI_STATUS_BUSY:
1983 desc_scsi_status = "busy";
1985 case MPI2_SCSI_STATUS_INTERMEDIATE:
1986 desc_scsi_status = "intermediate";
1988 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
1989 desc_scsi_status = "intermediate condmet";
1991 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
1992 desc_scsi_status = "reservation conflict";
1994 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
1995 desc_scsi_status = "command terminated";
1997 case MPI2_SCSI_STATUS_TASK_SET_FULL:
1998 desc_scsi_status = "task set full";
2000 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2001 desc_scsi_status = "aca active";
2003 case MPI2_SCSI_STATUS_TASK_ABORTED:
2004 desc_scsi_status = "task aborted";
2007 desc_scsi_status = "unknown";
2011 desc_scsi_state[0] = '\0';
2013 desc_scsi_state = " ";
2014 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2015 strcat(desc_scsi_state, "response info ");
2016 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2017 strcat(desc_scsi_state, "state terminated ");
2018 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2019 strcat(desc_scsi_state, "no status ");
2020 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2021 strcat(desc_scsi_state, "autosense failed ");
2022 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2023 strcat(desc_scsi_state, "autosense valid ");
2025 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2026 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2027 /* We can add more detail about underflow data here
2030 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2031 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2032 desc_scsi_state, scsi_state);
2034 if (sc->mps_debug & MPS_XINFO &&
2035 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2036 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2037 scsi_sense_print(csio);
2038 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2041 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2042 response_info = le32toh(mpi_reply->ResponseInfo);
2043 response_bytes = (u8 *)&response_info;
2044 mps_response_code(sc,response_bytes[0]);
2049 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2051 MPI2_SCSI_IO_REPLY *rep;
2053 struct ccb_scsiio *csio;
2054 struct mpssas_softc *sassc;
2055 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2056 u8 *TLR_bits, TLR_on;
2061 mps_dprint(sc, MPS_TRACE,
2062 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2063 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2064 cm->cm_targ->outstanding);
2066 callout_stop(&cm->cm_callout);
2067 mtx_assert(&sc->mps_mtx, MA_OWNED);
2070 ccb = cm->cm_complete_data;
2072 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2074 * XXX KDM if the chain allocation fails, does it matter if we do
2075 * the sync and unload here? It is simpler to do it in every case,
2076 * assuming it doesn't cause problems.
2078 if (cm->cm_data != NULL) {
2079 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2080 dir = BUS_DMASYNC_POSTREAD;
2081 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2082 dir = BUS_DMASYNC_POSTWRITE;
2083 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2084 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2087 cm->cm_targ->completed++;
2088 cm->cm_targ->outstanding--;
2089 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2090 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2092 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2093 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2094 if (cm->cm_reply != NULL)
2095 mpssas_log_command(cm, MPS_RECOVERY,
2096 "completed timedout cm %p ccb %p during recovery "
2097 "ioc %x scsi %x state %x xfer %u\n",
2099 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2100 le32toh(rep->TransferCount));
2102 mpssas_log_command(cm, MPS_RECOVERY,
2103 "completed timedout cm %p ccb %p during recovery\n",
2105 } else if (cm->cm_targ->tm != NULL) {
2106 if (cm->cm_reply != NULL)
2107 mpssas_log_command(cm, MPS_RECOVERY,
2108 "completed cm %p ccb %p during recovery "
2109 "ioc %x scsi %x state %x xfer %u\n",
2111 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2112 le32toh(rep->TransferCount));
2114 mpssas_log_command(cm, MPS_RECOVERY,
2115 "completed cm %p ccb %p during recovery\n",
2117 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2118 mpssas_log_command(cm, MPS_RECOVERY,
2119 "reset completed cm %p ccb %p\n",
2123 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2125 * We ran into an error after we tried to map the command,
2126 * so we're getting a callback without queueing the command
2127 * to the hardware. So we set the status here, and it will
2128 * be retained below. We'll go through the "fast path",
2129 * because there can be no reply when we haven't actually
2130 * gone out to the hardware.
2132 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2135 * Currently the only error included in the mask is
2136 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2137 * chain frames. We need to freeze the queue until we get
2138 * a command that completed without this error, which will
2139 * hopefully have some chain frames attached that we can
2140 * use. If we wanted to get smarter about it, we would
2141 * only unfreeze the queue in this condition when we're
2142 * sure that we're getting some chain frames back. That's
2143 * probably unnecessary.
2145 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2146 xpt_freeze_simq(sassc->sim, 1);
2147 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2148 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2149 "freezing SIM queue\n");
2154 * If this is a Start Stop Unit command and it was issued by the driver
2155 * during shutdown, decrement the refcount to account for all of the
2156 * commands that were sent. All SSU commands should be completed before
2157 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2160 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2161 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2165 /* Take the fast path to completion */
2166 if (cm->cm_reply == NULL) {
2167 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2168 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2169 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2171 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2172 ccb->csio.scsi_status = SCSI_STATUS_OK;
2174 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2175 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2176 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2177 mps_dprint(sc, MPS_XINFO,
2178 "Unfreezing SIM queue\n");
2183 * There are two scenarios where the status won't be
2184 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2185 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2187 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2189 * Freeze the dev queue so that commands are
2190 * executed in the correct order after error
2193 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2194 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2196 mps_free_command(sc, cm);
2201 mpssas_log_command(cm, MPS_XINFO,
2202 "ioc %x scsi %x state %x xfer %u\n",
2203 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2204 le32toh(rep->TransferCount));
2207 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2208 * Volume if an error occurred (normal I/O retry). Use the original
2209 * CCB, but set a flag that this will be a retry so that it's sent to
2210 * the original volume. Free the command but reuse the CCB.
2212 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2213 mps_free_command(sc, cm);
2214 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2215 mpssas_action_scsiio(sassc, ccb);
2218 ccb->ccb_h.sim_priv.entries[0].field = 0;
2220 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2221 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2222 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2224 case MPI2_IOCSTATUS_SUCCESS:
2225 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2227 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2228 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2229 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2231 /* Completion failed at the transport level. */
2232 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2233 MPI2_SCSI_STATE_TERMINATED)) {
2234 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2238 /* In a modern packetized environment, an autosense failure
2239 * implies that there's not much else that can be done to
2240 * recover the command.
2242 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2243 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2248 * CAM doesn't care about SAS Response Info data, but if this is
2249 * the state check if TLR should be done. If not, clear the
2250 * TLR_bits for the target.
2252 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2253 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2254 MPS_SCSI_RI_INVALID_FRAME)) {
2255 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2256 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2260 * Intentionally override the normal SCSI status reporting
2261 * for these two cases. These are likely to happen in a
2262 * multi-initiator environment, and we want to make sure that
2263 * CAM retries these commands rather than fail them.
2265 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2266 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2267 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2271 /* Handle normal status and sense */
2272 csio->scsi_status = rep->SCSIStatus;
2273 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2274 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2276 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2278 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2279 int sense_len, returned_sense_len;
2281 returned_sense_len = min(le32toh(rep->SenseCount),
2282 sizeof(struct scsi_sense_data));
2283 if (returned_sense_len < ccb->csio.sense_len)
2284 ccb->csio.sense_resid = ccb->csio.sense_len -
2287 ccb->csio.sense_resid = 0;
2289 sense_len = min(returned_sense_len,
2290 ccb->csio.sense_len - ccb->csio.sense_resid);
2291 bzero(&ccb->csio.sense_data,
2292 sizeof(ccb->csio.sense_data));
2293 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2294 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2298 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2299 * and it's page code 0 (Supported Page List), and there is
2300 * inquiry data, and this is for a sequential access device, and
2301 * the device is an SSP target, and TLR is supported by the
2302 * controller, turn the TLR_bits value ON if page 0x90 is
2305 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2306 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2307 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2308 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2309 (csio->data_ptr != NULL) &&
2310 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2311 (sc->control_TLR) &&
2312 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2313 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2314 vpd_list = (struct scsi_vpd_supported_page_list *)
2316 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2318 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2319 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2320 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2321 csio->cdb_io.cdb_bytes[4];
2322 alloc_len -= csio->resid;
2323 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2324 if (vpd_list->list[i] == 0x90) {
2331 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2332 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2334 * If devinfo is 0 this will be a volume. In that case don't
2335 * tell CAM that the volume is not there. We want volumes to
2336 * be enumerated until they are deleted/removed, not just
2339 if (cm->cm_targ->devinfo == 0)
2340 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2342 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2344 case MPI2_IOCSTATUS_INVALID_SGL:
2345 mps_print_scsiio_cmd(sc, cm);
2346 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2348 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2350 * This is one of the responses that comes back when an I/O
2351 * has been aborted. If it is because of a timeout that we
2352 * initiated, just set the status to CAM_CMD_TIMEOUT.
2353 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2354 * command is the same (it gets retried, subject to the
2355 * retry counter), the only difference is what gets printed
2358 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2359 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2361 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2363 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2364 /* resid is ignored for this condition */
2366 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2368 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2369 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2371 * Since these are generally external (i.e. hopefully
2372 * transient transport-related) errors, retry these without
2373 * decrementing the retry count.
2375 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2376 mpssas_log_command(cm, MPS_INFO,
2377 "terminated ioc %x scsi %x state %x xfer %u\n",
2378 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2379 le32toh(rep->TransferCount));
2381 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2382 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2383 case MPI2_IOCSTATUS_INVALID_VPID:
2384 case MPI2_IOCSTATUS_INVALID_FIELD:
2385 case MPI2_IOCSTATUS_INVALID_STATE:
2386 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2387 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2388 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2389 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2390 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2392 mpssas_log_command(cm, MPS_XINFO,
2393 "completed ioc %x scsi %x state %x xfer %u\n",
2394 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2395 le32toh(rep->TransferCount));
2396 csio->resid = cm->cm_length;
2397 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2401 mps_sc_failed_io_info(sc,csio,rep);
2403 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2404 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2405 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2406 mps_dprint(sc, MPS_XINFO, "Command completed, "
2407 "unfreezing SIM queue\n");
2410 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2411 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2412 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2415 mps_free_command(sc, cm);
2419 /* All Request reached here are Endian safe */
2421 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2423 pMpi2SCSIIORequest_t pIO_req;
2424 struct mps_softc *sc = sassc->sc;
2426 uint32_t physLBA, stripe_offset, stripe_unit;
2427 uint32_t io_size, column;
2428 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2431 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2432 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2433 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2434 * bit different than the 10/16 CDBs, handle them separately.
2436 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2437 CDB = pIO_req->CDB.CDB32;
2440 * Handle 6 byte CDBs.
2442 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2443 (CDB[0] == WRITE_6))) {
2445 * Get the transfer size in blocks.
2447 io_size = (cm->cm_length >> sc->DD_block_exponent);
2450 * Get virtual LBA given in the CDB.
2452 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2453 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2456 * Check that LBA range for I/O does not exceed volume's
2459 if ((virtLBA + (uint64_t)io_size - 1) <=
2462 * Check if the I/O crosses a stripe boundary. If not,
2463 * translate the virtual LBA to a physical LBA and set
2464 * the DevHandle for the PhysDisk to be used. If it
2465 * does cross a boundry, do normal I/O. To get the
2466 * right DevHandle to use, get the map number for the
2467 * column, then use that map number to look up the
2468 * DevHandle of the PhysDisk.
2470 stripe_offset = (uint32_t)virtLBA &
2471 (sc->DD_stripe_size - 1);
2472 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2473 physLBA = (uint32_t)virtLBA >>
2474 sc->DD_stripe_exponent;
2475 stripe_unit = physLBA / sc->DD_num_phys_disks;
2476 column = physLBA % sc->DD_num_phys_disks;
2477 pIO_req->DevHandle =
2478 htole16(sc->DD_column_map[column].dev_handle);
2479 /* ???? Is this endian safe*/
2480 cm->cm_desc.SCSIIO.DevHandle =
2483 physLBA = (stripe_unit <<
2484 sc->DD_stripe_exponent) + stripe_offset;
2485 ptrLBA = &pIO_req->CDB.CDB32[1];
2486 physLBA_byte = (uint8_t)(physLBA >> 16);
2487 *ptrLBA = physLBA_byte;
2488 ptrLBA = &pIO_req->CDB.CDB32[2];
2489 physLBA_byte = (uint8_t)(physLBA >> 8);
2490 *ptrLBA = physLBA_byte;
2491 ptrLBA = &pIO_req->CDB.CDB32[3];
2492 physLBA_byte = (uint8_t)physLBA;
2493 *ptrLBA = physLBA_byte;
2496 * Set flag that Direct Drive I/O is
2499 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2506 * Handle 10, 12 or 16 byte CDBs.
2508 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2509 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2510 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2511 (CDB[0] == WRITE_12))) {
2513 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2514 * are 0. If not, this is accessing beyond 2TB so handle it in
2515 * the else section. 10-byte and 12-byte CDB's are OK.
2516 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2517 * ready to accept 12byte CDB for Direct IOs.
2519 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2520 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2521 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2523 * Get the transfer size in blocks.
2525 io_size = (cm->cm_length >> sc->DD_block_exponent);
2528 * Get virtual LBA. Point to correct lower 4 bytes of
2529 * LBA in the CDB depending on command.
2531 lba_idx = ((CDB[0] == READ_12) ||
2532 (CDB[0] == WRITE_12) ||
2533 (CDB[0] == READ_10) ||
2534 (CDB[0] == WRITE_10))? 2 : 6;
2535 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2536 ((uint64_t)CDB[lba_idx + 1] << 16) |
2537 ((uint64_t)CDB[lba_idx + 2] << 8) |
2538 (uint64_t)CDB[lba_idx + 3];
2541 * Check that LBA range for I/O does not exceed volume's
2544 if ((virtLBA + (uint64_t)io_size - 1) <=
2547 * Check if the I/O crosses a stripe boundary.
2548 * If not, translate the virtual LBA to a
2549 * physical LBA and set the DevHandle for the
2550 * PhysDisk to be used. If it does cross a
2551 * boundry, do normal I/O. To get the right
2552 * DevHandle to use, get the map number for the
2553 * column, then use that map number to look up
2554 * the DevHandle of the PhysDisk.
2556 stripe_offset = (uint32_t)virtLBA &
2557 (sc->DD_stripe_size - 1);
2558 if ((stripe_offset + io_size) <=
2559 sc->DD_stripe_size) {
2560 physLBA = (uint32_t)virtLBA >>
2561 sc->DD_stripe_exponent;
2562 stripe_unit = physLBA /
2563 sc->DD_num_phys_disks;
2565 sc->DD_num_phys_disks;
2566 pIO_req->DevHandle =
2567 htole16(sc->DD_column_map[column].
2569 cm->cm_desc.SCSIIO.DevHandle =
2572 physLBA = (stripe_unit <<
2573 sc->DD_stripe_exponent) +
2576 &pIO_req->CDB.CDB32[lba_idx];
2577 physLBA_byte = (uint8_t)(physLBA >> 24);
2578 *ptrLBA = physLBA_byte;
2580 &pIO_req->CDB.CDB32[lba_idx + 1];
2581 physLBA_byte = (uint8_t)(physLBA >> 16);
2582 *ptrLBA = physLBA_byte;
2584 &pIO_req->CDB.CDB32[lba_idx + 2];
2585 physLBA_byte = (uint8_t)(physLBA >> 8);
2586 *ptrLBA = physLBA_byte;
2588 &pIO_req->CDB.CDB32[lba_idx + 3];
2589 physLBA_byte = (uint8_t)physLBA;
2590 *ptrLBA = physLBA_byte;
2593 * Set flag that Direct Drive I/O is
2596 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2601 * 16-byte CDB and the upper 4 bytes of the CDB are not
2602 * 0. Get the transfer size in blocks.
2604 io_size = (cm->cm_length >> sc->DD_block_exponent);
2609 virtLBA = ((uint64_t)CDB[2] << 54) |
2610 ((uint64_t)CDB[3] << 48) |
2611 ((uint64_t)CDB[4] << 40) |
2612 ((uint64_t)CDB[5] << 32) |
2613 ((uint64_t)CDB[6] << 24) |
2614 ((uint64_t)CDB[7] << 16) |
2615 ((uint64_t)CDB[8] << 8) |
2619 * Check that LBA range for I/O does not exceed volume's
2622 if ((virtLBA + (uint64_t)io_size - 1) <=
2625 * Check if the I/O crosses a stripe boundary.
2626 * If not, translate the virtual LBA to a
2627 * physical LBA and set the DevHandle for the
2628 * PhysDisk to be used. If it does cross a
2629 * boundry, do normal I/O. To get the right
2630 * DevHandle to use, get the map number for the
2631 * column, then use that map number to look up
2632 * the DevHandle of the PhysDisk.
2634 stripe_offset = (uint32_t)virtLBA &
2635 (sc->DD_stripe_size - 1);
2636 if ((stripe_offset + io_size) <=
2637 sc->DD_stripe_size) {
2638 physLBA = (uint32_t)(virtLBA >>
2639 sc->DD_stripe_exponent);
2640 stripe_unit = physLBA /
2641 sc->DD_num_phys_disks;
2643 sc->DD_num_phys_disks;
2644 pIO_req->DevHandle =
2645 htole16(sc->DD_column_map[column].
2647 cm->cm_desc.SCSIIO.DevHandle =
2650 physLBA = (stripe_unit <<
2651 sc->DD_stripe_exponent) +
2655 * Set upper 4 bytes of LBA to 0. We
2656 * assume that the phys disks are less
2657 * than 2 TB's in size. Then, set the
2660 pIO_req->CDB.CDB32[2] = 0;
2661 pIO_req->CDB.CDB32[3] = 0;
2662 pIO_req->CDB.CDB32[4] = 0;
2663 pIO_req->CDB.CDB32[5] = 0;
2664 ptrLBA = &pIO_req->CDB.CDB32[6];
2665 physLBA_byte = (uint8_t)(physLBA >> 24);
2666 *ptrLBA = physLBA_byte;
2667 ptrLBA = &pIO_req->CDB.CDB32[7];
2668 physLBA_byte = (uint8_t)(physLBA >> 16);
2669 *ptrLBA = physLBA_byte;
2670 ptrLBA = &pIO_req->CDB.CDB32[8];
2671 physLBA_byte = (uint8_t)(physLBA >> 8);
2672 *ptrLBA = physLBA_byte;
2673 ptrLBA = &pIO_req->CDB.CDB32[9];
2674 physLBA_byte = (uint8_t)physLBA;
2675 *ptrLBA = physLBA_byte;
2678 * Set flag that Direct Drive I/O is
2681 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2688 #if __FreeBSD_version >= 900026
2690 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2692 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2693 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2697 ccb = cm->cm_complete_data;
2700 * Currently there should be no way we can hit this case. It only
2701 * happens when we have a failure to allocate chain frames, and SMP
2702 * commands require two S/G elements only. That should be handled
2703 * in the standard request size.
2705 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2706 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2707 __func__, cm->cm_flags);
2708 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2712 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2714 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2715 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2719 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2720 sasaddr = le32toh(req->SASAddress.Low);
2721 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2723 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2724 MPI2_IOCSTATUS_SUCCESS ||
2725 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2726 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2727 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2728 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2732 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2733 "%#jx completed successfully\n", __func__,
2734 (uintmax_t)sasaddr);
2736 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2737 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2739 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2743 * We sync in both directions because we had DMAs in the S/G list
2744 * in both directions.
2746 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2747 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2748 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2749 mps_free_command(sc, cm);
2754 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2756 struct mps_command *cm;
2757 uint8_t *request, *response;
2758 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2759 struct mps_softc *sc;
2768 * XXX We don't yet support physical addresses here.
2770 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2771 case CAM_DATA_PADDR:
2772 case CAM_DATA_SG_PADDR:
2773 mps_dprint(sc, MPS_ERROR,
2774 "%s: physical addresses not supported\n", __func__);
2775 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2780 * The chip does not support more than one buffer for the
2781 * request or response.
2783 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2784 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2785 mps_dprint(sc, MPS_ERROR,
2786 "%s: multiple request or response "
2787 "buffer segments not supported for SMP\n",
2789 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2795 * The CAM_SCATTER_VALID flag was originally implemented
2796 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2797 * We have two. So, just take that flag to mean that we
2798 * might have S/G lists, and look at the S/G segment count
2799 * to figure out whether that is the case for each individual
2802 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2803 bus_dma_segment_t *req_sg;
2805 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2806 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2808 request = ccb->smpio.smp_request;
2810 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2811 bus_dma_segment_t *rsp_sg;
2813 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2814 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2816 response = ccb->smpio.smp_response;
2818 case CAM_DATA_VADDR:
2819 request = ccb->smpio.smp_request;
2820 response = ccb->smpio.smp_response;
2823 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2828 cm = mps_alloc_command(sc);
2830 mps_dprint(sc, MPS_ERROR,
2831 "%s: cannot allocate command\n", __func__);
2832 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2837 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2838 bzero(req, sizeof(*req));
2839 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2841 /* Allow the chip to use any route to this SAS address. */
2842 req->PhysicalPort = 0xff;
2844 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2846 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2848 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2849 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2851 mpi_init_sge(cm, req, &req->SGL);
2854 * Set up a uio to pass into mps_map_command(). This allows us to
2855 * do one map command, and one busdma call in there.
2857 cm->cm_uio.uio_iov = cm->cm_iovec;
2858 cm->cm_uio.uio_iovcnt = 2;
2859 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2862 * The read/write flag isn't used by busdma, but set it just in
2863 * case. This isn't exactly accurate, either, since we're going in
2866 cm->cm_uio.uio_rw = UIO_WRITE;
2868 cm->cm_iovec[0].iov_base = request;
2869 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2870 cm->cm_iovec[1].iov_base = response;
2871 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2873 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2874 cm->cm_iovec[1].iov_len;
2877 * Trigger a warning message in mps_data_cb() for the user if we
2878 * wind up exceeding two S/G segments. The chip expects one
2879 * segment for the request and another for the response.
2881 cm->cm_max_segs = 2;
2883 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2884 cm->cm_complete = mpssas_smpio_complete;
2885 cm->cm_complete_data = ccb;
2888 * Tell the mapping code that we're using a uio, and that this is
2889 * an SMP passthrough request. There is a little special-case
2890 * logic there (in mps_data_cb()) to handle the bidirectional
2893 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2894 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2896 /* The chip data format is little endian. */
2897 req->SASAddress.High = htole32(sasaddr >> 32);
2898 req->SASAddress.Low = htole32(sasaddr);
2901 * XXX Note that we don't have a timeout/abort mechanism here.
2902 * From the manual, it looks like task management requests only
2903 * work for SCSI IO and SATA passthrough requests. We may need to
2904 * have a mechanism to retry requests in the event of a chip reset
2905 * at least. Hopefully the chip will insure that any errors short
2906 * of that are relayed back to the driver.
2908 error = mps_map_command(sc, cm);
2909 if ((error != 0) && (error != EINPROGRESS)) {
2910 mps_dprint(sc, MPS_ERROR,
2911 "%s: error %d returned from mps_map_command()\n",
2919 mps_free_command(sc, cm);
2920 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2927 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2929 struct mps_softc *sc;
2930 struct mpssas_target *targ;
2931 uint64_t sasaddr = 0;
2936 * Make sure the target exists.
2938 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2939 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2940 targ = &sassc->targets[ccb->ccb_h.target_id];
2941 if (targ->handle == 0x0) {
2942 mps_dprint(sc, MPS_ERROR,
2943 "%s: target %d does not exist!\n", __func__,
2944 ccb->ccb_h.target_id);
2945 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2951 * If this device has an embedded SMP target, we'll talk to it
2953 * figure out what the expander's address is.
2955 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2956 sasaddr = targ->sasaddr;
2959 * If we don't have a SAS address for the expander yet, try
2960 * grabbing it from the page 0x83 information cached in the
2961 * transport layer for this target. LSI expanders report the
2962 * expander SAS address as the port-associated SAS address in
2963 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2966 * XXX KDM disable this for now, but leave it commented out so that
2967 * it is obvious that this is another possible way to get the SAS
2970 * The parent handle method below is a little more reliable, and
2971 * the other benefit is that it works for devices other than SES
2972 * devices. So you can send a SMP request to a da(4) device and it
2973 * will get routed to the expander that device is attached to.
2974 * (Assuming the da(4) device doesn't contain an SMP target...)
2978 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2982 * If we still don't have a SAS address for the expander, look for
2983 * the parent device of this device, which is probably the expander.
2986 #ifdef OLD_MPS_PROBE
2987 struct mpssas_target *parent_target;
2990 if (targ->parent_handle == 0x0) {
2991 mps_dprint(sc, MPS_ERROR,
2992 "%s: handle %d does not have a valid "
2993 "parent handle!\n", __func__, targ->handle);
2994 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2997 #ifdef OLD_MPS_PROBE
2998 parent_target = mpssas_find_target_by_handle(sassc, 0,
2999 targ->parent_handle);
3001 if (parent_target == NULL) {
3002 mps_dprint(sc, MPS_ERROR,
3003 "%s: handle %d does not have a valid "
3004 "parent target!\n", __func__, targ->handle);
3005 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3009 if ((parent_target->devinfo &
3010 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3011 mps_dprint(sc, MPS_ERROR,
3012 "%s: handle %d parent %d does not "
3013 "have an SMP target!\n", __func__,
3014 targ->handle, parent_target->handle);
3015 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3020 sasaddr = parent_target->sasaddr;
3021 #else /* OLD_MPS_PROBE */
3022 if ((targ->parent_devinfo &
3023 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3024 mps_dprint(sc, MPS_ERROR,
3025 "%s: handle %d parent %d does not "
3026 "have an SMP target!\n", __func__,
3027 targ->handle, targ->parent_handle);
3028 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3032 if (targ->parent_sasaddr == 0x0) {
3033 mps_dprint(sc, MPS_ERROR,
3034 "%s: handle %d parent handle %d does "
3035 "not have a valid SAS address!\n",
3036 __func__, targ->handle, targ->parent_handle);
3037 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3041 sasaddr = targ->parent_sasaddr;
3042 #endif /* OLD_MPS_PROBE */
3047 mps_dprint(sc, MPS_INFO,
3048 "%s: unable to find SAS address for handle %d\n",
3049 __func__, targ->handle);
3050 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3053 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3061 #endif //__FreeBSD_version >= 900026
3064 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3066 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3067 struct mps_softc *sc;
3068 struct mps_command *tm;
3069 struct mpssas_target *targ;
3071 MPS_FUNCTRACE(sassc->sc);
3072 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3074 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3075 ("Target %d out of bounds in XPT_RESET_DEV\n",
3076 ccb->ccb_h.target_id));
3078 tm = mps_alloc_command(sc);
3080 mps_dprint(sc, MPS_ERROR,
3081 "command alloc failure in mpssas_action_resetdev\n");
3082 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3087 targ = &sassc->targets[ccb->ccb_h.target_id];
3088 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3089 req->DevHandle = htole16(targ->handle);
3090 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3091 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3093 /* SAS Hard Link Reset / SATA Link Reset */
3094 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3097 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3098 tm->cm_complete = mpssas_resetdev_complete;
3099 tm->cm_complete_data = ccb;
3101 mps_map_command(sc, tm);
3105 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3107 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3111 mtx_assert(&sc->mps_mtx, MA_OWNED);
3113 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3114 ccb = tm->cm_complete_data;
3117 * Currently there should be no way we can hit this case. It only
3118 * happens when we have a failure to allocate chain frames, and
3119 * task management commands don't have S/G lists.
3121 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3122 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3124 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3126 mps_dprint(sc, MPS_ERROR,
3127 "%s: cm_flags = %#x for reset of handle %#04x! "
3128 "This should not happen!\n", __func__, tm->cm_flags,
3130 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3134 mps_dprint(sc, MPS_XINFO,
3135 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3136 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3138 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3139 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3140 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3144 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3148 mpssas_free_tm(sc, tm);
3153 mpssas_poll(struct cam_sim *sim)
3155 struct mpssas_softc *sassc;
3157 sassc = cam_sim_softc(sim);
3159 if (sassc->sc->mps_debug & MPS_TRACE) {
3160 /* frequent debug messages during a panic just slow
3161 * everything down too much.
3163 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3164 sassc->sc->mps_debug &= ~MPS_TRACE;
3167 mps_intr_locked(sassc->sc);
3171 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3174 struct mps_softc *sc;
3176 sc = (struct mps_softc *)callback_arg;
3179 #if (__FreeBSD_version >= 1000006) || \
3180 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3181 case AC_ADVINFO_CHANGED: {
3182 struct mpssas_target *target;
3183 struct mpssas_softc *sassc;
3184 struct scsi_read_capacity_data_long rcap_buf;
3185 struct ccb_dev_advinfo cdai;
3186 struct mpssas_lun *lun;
3191 buftype = (uintptr_t)arg;
3197 * We're only interested in read capacity data changes.
3199 if (buftype != CDAI_TYPE_RCAPLONG)
3203 * We should have a handle for this, but check to make sure.
3205 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3206 ("Target %d out of bounds in mpssas_async\n",
3207 xpt_path_target_id(path)));
3208 target = &sassc->targets[xpt_path_target_id(path)];
3209 if (target->handle == 0)
3212 lunid = xpt_path_lun_id(path);
3214 SLIST_FOREACH(lun, &target->luns, lun_link) {
3215 if (lun->lun_id == lunid) {
3221 if (found_lun == 0) {
3222 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3225 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3226 "LUN for EEDP support.\n");
3229 lun->lun_id = lunid;
3230 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3233 bzero(&rcap_buf, sizeof(rcap_buf));
3234 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3235 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3236 cdai.ccb_h.flags = CAM_DIR_IN;
3237 cdai.buftype = CDAI_TYPE_RCAPLONG;
3239 cdai.bufsiz = sizeof(rcap_buf);
3240 cdai.buf = (uint8_t *)&rcap_buf;
3241 xpt_action((union ccb *)&cdai);
3242 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3243 cam_release_devq(cdai.ccb_h.path,
3246 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3247 && (rcap_buf.prot & SRC16_PROT_EN)) {
3248 lun->eedp_formatted = TRUE;
3249 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3251 lun->eedp_formatted = FALSE;
3252 lun->eedp_block_size = 0;
3257 case AC_FOUND_DEVICE: {
3258 struct ccb_getdev *cgd;
3261 mpssas_check_eedp(sc, path, cgd);
3270 #if (__FreeBSD_version < 901503) || \
3271 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3273 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3274 struct ccb_getdev *cgd)
3276 struct mpssas_softc *sassc = sc->sassc;
3277 struct ccb_scsiio *csio;
3278 struct scsi_read_capacity_16 *scsi_cmd;
3279 struct scsi_read_capacity_eedp *rcap_buf;
3281 target_id_t targetid;
3284 struct cam_path *local_path;
3285 struct mpssas_target *target;
3286 struct mpssas_lun *lun;
3291 pathid = cam_sim_path(sassc->sim);
3292 targetid = xpt_path_target_id(path);
3293 lunid = xpt_path_lun_id(path);
3295 KASSERT(targetid < sassc->maxtargets,
3296 ("Target %d out of bounds in mpssas_check_eedp\n",
3298 target = &sassc->targets[targetid];
3299 if (target->handle == 0x0)
3303 * Determine if the device is EEDP capable.
3305 * If this flag is set in the inquiry data,
3306 * the device supports protection information,
3307 * and must support the 16 byte read
3308 * capacity command, otherwise continue without
3309 * sending read cap 16
3311 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3315 * Issue a READ CAPACITY 16 command. This info
3316 * is used to determine if the LUN is formatted
3319 ccb = xpt_alloc_ccb_nowait();
3321 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3322 "for EEDP support.\n");
3326 if (xpt_create_path(&local_path, xpt_periph,
3327 pathid, targetid, lunid) != CAM_REQ_CMP) {
3328 mps_dprint(sc, MPS_ERROR, "Unable to create "
3329 "path for EEDP support\n");
3335 * If LUN is already in list, don't create a new
3339 SLIST_FOREACH(lun, &target->luns, lun_link) {
3340 if (lun->lun_id == lunid) {
3346 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3349 mps_dprint(sc, MPS_ERROR,
3350 "Unable to alloc LUN for EEDP support.\n");
3351 xpt_free_path(local_path);
3355 lun->lun_id = lunid;
3356 SLIST_INSERT_HEAD(&target->luns, lun,
3360 xpt_path_string(local_path, path_str, sizeof(path_str));
3363 * If this is a SATA direct-access end device,
3364 * mark it so that a SCSI StartStopUnit command
3365 * will be sent to it when the driver is being
3368 if ((cgd.inq_data.device == T_DIRECT) &&
3369 (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3370 ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3371 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3372 lun->stop_at_shutdown = TRUE;
3375 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3376 path_str, target->handle);
3379 * Issue a READ CAPACITY 16 command for the LUN.
3380 * The mpssas_read_cap_done function will load
3381 * the read cap info into the LUN struct.
3383 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3384 M_MPT2, M_NOWAIT | M_ZERO);
3385 if (rcap_buf == NULL) {
3386 mps_dprint(sc, MPS_FAULT,
3387 "Unable to alloc read capacity buffer for EEDP support.\n");
3388 xpt_free_path(ccb->ccb_h.path);
3392 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3394 csio->ccb_h.func_code = XPT_SCSI_IO;
3395 csio->ccb_h.flags = CAM_DIR_IN;
3396 csio->ccb_h.retry_count = 4;
3397 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3398 csio->ccb_h.timeout = 60000;
3399 csio->data_ptr = (uint8_t *)rcap_buf;
3400 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3401 csio->sense_len = MPS_SENSE_LEN;
3402 csio->cdb_len = sizeof(*scsi_cmd);
3403 csio->tag_action = MSG_SIMPLE_Q_TAG;
3405 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3406 bzero(scsi_cmd, sizeof(*scsi_cmd));
3407 scsi_cmd->opcode = 0x9E;
3408 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3409 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3411 ccb->ccb_h.ppriv_ptr1 = sassc;
3416 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3418 struct mpssas_softc *sassc;
3419 struct mpssas_target *target;
3420 struct mpssas_lun *lun;
3421 struct scsi_read_capacity_eedp *rcap_buf;
3423 if (done_ccb == NULL)
3426 /* Driver need to release devq, it Scsi command is
3427 * generated by driver internally.
3428 * Currently there is a single place where driver
3429 * calls scsi command internally. In future if driver
3430 * calls more scsi command internally, it needs to release
3431 * devq internally, since those command will not go back to
3434 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3435 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3436 xpt_release_devq(done_ccb->ccb_h.path,
3437 /*count*/ 1, /*run_queue*/TRUE);
3440 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3443 * Get the LUN ID for the path and look it up in the LUN list for the
3446 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3447 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3448 ("Target %d out of bounds in mpssas_read_cap_done\n",
3449 done_ccb->ccb_h.target_id));
3450 target = &sassc->targets[done_ccb->ccb_h.target_id];
3451 SLIST_FOREACH(lun, &target->luns, lun_link) {
3452 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3456 * Got the LUN in the target's LUN list. Fill it in
3457 * with EEDP info. If the READ CAP 16 command had some
3458 * SCSI error (common if command is not supported), mark
3459 * the lun as not supporting EEDP and set the block size
3462 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3463 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3464 lun->eedp_formatted = FALSE;
3465 lun->eedp_block_size = 0;
3469 if (rcap_buf->protect & 0x01) {
3470 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3471 "target ID %d is formatted for EEDP "
3472 "support.\n", done_ccb->ccb_h.target_lun,
3473 done_ccb->ccb_h.target_id);
3474 lun->eedp_formatted = TRUE;
3475 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3480 // Finished with this CCB and path.
3481 free(rcap_buf, M_MPT2);
3482 xpt_free_path(done_ccb->ccb_h.path);
3483 xpt_free_ccb(done_ccb);
3485 #endif /* (__FreeBSD_version < 901503) || \
3486 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3489 mpssas_startup(struct mps_softc *sc)
3493 * Send the port enable message and set the wait_for_port_enable flag.
3494 * This flag helps to keep the simq frozen until all discovery events
3497 sc->wait_for_port_enable = 1;
3498 mpssas_send_portenable(sc);
3503 mpssas_send_portenable(struct mps_softc *sc)
3505 MPI2_PORT_ENABLE_REQUEST *request;
3506 struct mps_command *cm;
3510 if ((cm = mps_alloc_command(sc)) == NULL)
3512 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3513 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3514 request->MsgFlags = 0;
3516 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3517 cm->cm_complete = mpssas_portenable_complete;
3521 mps_map_command(sc, cm);
3522 mps_dprint(sc, MPS_XINFO,
3523 "mps_send_portenable finished cm %p req %p complete %p\n",
3524 cm, cm->cm_req, cm->cm_complete);
3529 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3531 MPI2_PORT_ENABLE_REPLY *reply;
3532 struct mpssas_softc *sassc;
3538 * Currently there should be no way we can hit this case. It only
3539 * happens when we have a failure to allocate chain frames, and
3540 * port enable commands don't have S/G lists.
3542 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3543 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3544 "This should not happen!\n", __func__, cm->cm_flags);
3547 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3549 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3550 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3551 MPI2_IOCSTATUS_SUCCESS)
3552 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3554 mps_free_command(sc, cm);
3555 if (sc->mps_ich.ich_arg != NULL) {
3556 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3557 config_intrhook_disestablish(&sc->mps_ich);
3558 sc->mps_ich.ich_arg = NULL;
3562 * Get WarpDrive info after discovery is complete but before the scan
3563 * starts. At this point, all devices are ready to be exposed to the
3564 * OS. If devices should be hidden instead, take them out of the
3565 * 'targets' array before the scan. The devinfo for a disk will have
3566 * some info and a volume's will be 0. Use that to remove disks.
3568 mps_wd_config_pages(sc);
3571 * Done waiting for port enable to complete. Decrement the refcount.
3572 * If refcount is 0, discovery is complete and a rescan of the bus can
3573 * take place. Since the simq was explicitly frozen before port
3574 * enable, it must be explicitly released here to keep the
3575 * freeze/release count in sync.
3577 sc->wait_for_port_enable = 0;
3578 sc->port_enable_complete = 1;
3579 wakeup(&sc->port_enable_complete);
3580 mpssas_startup_decrement(sassc);
3584 mpssas_check_id(struct mpssas_softc *sassc, int id)
3586 struct mps_softc *sc = sassc->sc;
3590 ids = &sc->exclude_ids[0];
3591 while((name = strsep(&ids, ",")) != NULL) {
3592 if (name[0] == '\0')
3594 if (strtol(name, NULL, 0) == (long)id)
3602 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3604 struct mpssas_softc *sassc;
3605 struct mpssas_lun *lun, *lun_tmp;
3606 struct mpssas_target *targ;
3611 * The number of targets is based on IOC Facts, so free all of
3612 * the allocated LUNs for each target and then the target buffer
3615 for (i=0; i< maxtargets; i++) {
3616 targ = &sassc->targets[i];
3617 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3621 free(sassc->targets, M_MPT2);
3623 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3624 M_MPT2, M_WAITOK|M_ZERO);
3625 if (!sassc->targets) {
3626 panic("%s failed to alloc targets with error %d\n",