2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * LSI MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for LSI MPT2 */
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
88 #define MPSSAS_DISCOVERY_TIMEOUT 20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126 struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149 struct mps_command *cm);
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
154 struct mpssas_target *target;
157 for (i = start; i < sassc->maxtargets; i++) {
158 target = &sassc->targets[i];
159 if (target->handle == handle)
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery. Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
174 mpssas_startup_increment(struct mpssas_softc *sassc)
176 MPS_FUNCTRACE(sassc->sc);
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INIT,
182 "%s freezing simq\n", __func__);
183 #if __FreeBSD_version >= 1000039
186 xpt_freeze_simq(sassc->sim, 1);
188 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
189 sassc->startup_refcount);
194 mpssas_startup_decrement(struct mpssas_softc *sassc)
196 MPS_FUNCTRACE(sassc->sc);
198 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
199 if (--sassc->startup_refcount == 0) {
200 /* finished all discovery-related actions, release
201 * the simq and rescan for the latest topology.
203 mps_dprint(sassc->sc, MPS_INIT,
204 "%s releasing simq\n", __func__);
205 sassc->flags &= ~MPSSAS_IN_STARTUP;
206 xpt_release_simq(sassc->sim, 1);
207 #if __FreeBSD_version >= 1000039
210 mpssas_rescan_target(sassc->sc, NULL);
213 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
214 sassc->startup_refcount);
218 /* LSI's firmware requires us to stop sending commands when we're doing task
219 * management, so refcount the TMs and keep the simq frozen when any are in
223 mpssas_alloc_tm(struct mps_softc *sc)
225 struct mps_command *tm;
228 tm = mps_alloc_high_priority_command(sc);
230 if (sc->sassc->tm_count++ == 0) {
231 mps_dprint(sc, MPS_RECOVERY,
232 "%s freezing simq\n", __func__);
233 xpt_freeze_simq(sc->sassc->sim, 1);
235 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
236 sc->sassc->tm_count);
242 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
244 mps_dprint(sc, MPS_TRACE, "%s", __func__);
248 /* if there are no TMs in use, we can release the simq. We use our
249 * own refcount so that it's easier for a diag reset to cleanup and
252 if (--sc->sassc->tm_count == 0) {
253 mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
254 xpt_release_simq(sc->sassc->sim, 1);
256 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
257 sc->sassc->tm_count);
259 mps_free_high_priority_command(sc, tm);
263 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
265 struct mpssas_softc *sassc = sc->sassc;
267 target_id_t targetid;
271 pathid = cam_sim_path(sassc->sim);
273 targetid = CAM_TARGET_WILDCARD;
275 targetid = targ - sassc->targets;
278 * Allocate a CCB and schedule a rescan.
280 ccb = xpt_alloc_ccb_nowait();
282 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
286 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
287 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
288 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
293 if (targetid == CAM_TARGET_WILDCARD)
294 ccb->ccb_h.func_code = XPT_SCAN_BUS;
296 ccb->ccb_h.func_code = XPT_SCAN_TGT;
298 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
303 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
313 /* No need to be in here if debugging isn't enabled */
314 if ((cm->cm_sc->mps_debug & level) == 0)
317 sbuf_new(&sb, str, sizeof(str), 0);
321 if (cm->cm_ccb != NULL) {
322 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
324 sbuf_cat(&sb, path_str);
325 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
326 scsi_command_string(&cm->cm_ccb->csio, &sb);
327 sbuf_printf(&sb, "length %d ",
328 cm->cm_ccb->csio.dxfer_len);
332 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
333 cam_sim_name(cm->cm_sc->sassc->sim),
334 cam_sim_unit(cm->cm_sc->sassc->sim),
335 cam_sim_bus(cm->cm_sc->sassc->sim),
336 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
340 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
341 sbuf_vprintf(&sb, fmt, ap);
343 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
350 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
352 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
353 struct mpssas_target *targ;
358 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
359 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
363 /* XXX retry the remove after the diag reset completes? */
364 mps_dprint(sc, MPS_FAULT,
365 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
366 mpssas_free_tm(sc, tm);
370 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
371 mps_dprint(sc, MPS_FAULT,
372 "IOCStatus = 0x%x while resetting device 0x%x\n",
373 reply->IOCStatus, handle);
374 mpssas_free_tm(sc, tm);
378 mps_dprint(sc, MPS_XINFO,
379 "Reset aborted %u commands\n", reply->TerminationCount);
380 mps_free_reply(sc, tm->cm_reply_data);
381 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
383 mps_dprint(sc, MPS_XINFO,
384 "clearing target %u handle 0x%04x\n", targ->tid, handle);
387 * Don't clear target if remove fails because things will get confusing.
388 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 * this target id if possible, and so we can assign the same target id
390 * to this device if it comes back in the future.
392 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
395 targ->encl_handle = 0x0;
396 targ->encl_slot = 0x0;
397 targ->exp_dev_handle = 0x0;
399 targ->linkrate = 0x0;
404 mpssas_free_tm(sc, tm);
409 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
410 * Otherwise Volume Delete is same as Bare Drive Removal.
413 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
415 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
416 struct mps_softc *sc;
417 struct mps_command *cm;
418 struct mpssas_target *targ = NULL;
420 MPS_FUNCTRACE(sassc->sc);
425 * If this is a WD controller, determine if the disk should be exposed
426 * to the OS or not. If disk should be exposed, return from this
427 * function without doing anything.
429 if (sc->WD_available && (sc->WD_hide_expose ==
430 MPS_WD_EXPOSE_ALWAYS)) {
435 targ = mpssas_find_target_by_handle(sassc, 0, handle);
437 /* FIXME: what is the action? */
438 /* We don't know about this device? */
439 mps_dprint(sc, MPS_ERROR,
440 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
444 targ->flags |= MPSSAS_TARGET_INREMOVAL;
446 cm = mpssas_alloc_tm(sc);
448 mps_dprint(sc, MPS_ERROR,
449 "%s: command alloc failure\n", __func__);
453 mpssas_rescan_target(sc, targ);
455 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
456 req->DevHandle = targ->handle;
457 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
458 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
460 /* SAS Hard Link Reset / SATA Link Reset */
461 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
465 cm->cm_desc.HighPriority.RequestFlags =
466 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
467 cm->cm_complete = mpssas_remove_volume;
468 cm->cm_complete_data = (void *)(uintptr_t)handle;
469 mps_map_command(sc, cm);
473 * The MPT2 firmware performs debounce on the link to avoid transient link
474 * errors and false removals. When it does decide that link has been lost
475 * and a device need to go away, it expects that the host will perform a
476 * target reset and then an op remove. The reset has the side-effect of
477 * aborting any outstanding requests for the device, which is required for
478 * the op-remove to succeed. It's not clear if the host should check for
479 * the device coming back alive after the reset.
482 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
484 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
485 struct mps_softc *sc;
486 struct mps_command *cm;
487 struct mpssas_target *targ = NULL;
489 MPS_FUNCTRACE(sassc->sc);
493 targ = mpssas_find_target_by_handle(sassc, 0, handle);
495 /* FIXME: what is the action? */
496 /* We don't know about this device? */
497 mps_dprint(sc, MPS_ERROR,
498 "%s : invalid handle 0x%x \n", __func__, handle);
502 targ->flags |= MPSSAS_TARGET_INREMOVAL;
504 cm = mpssas_alloc_tm(sc);
506 mps_dprint(sc, MPS_ERROR,
507 "%s: command alloc failure\n", __func__);
511 mpssas_rescan_target(sc, targ);
513 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
514 memset(req, 0, sizeof(*req));
515 req->DevHandle = htole16(targ->handle);
516 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
517 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
519 /* SAS Hard Link Reset / SATA Link Reset */
520 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
524 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
525 cm->cm_complete = mpssas_remove_device;
526 cm->cm_complete_data = (void *)(uintptr_t)handle;
527 mps_map_command(sc, cm);
531 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
533 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
534 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
535 struct mpssas_target *targ;
536 struct mps_command *next_cm;
541 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
542 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
546 * Currently there should be no way we can hit this case. It only
547 * happens when we have a failure to allocate chain frames, and
548 * task management commands don't have S/G lists.
550 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
551 mps_dprint(sc, MPS_ERROR,
552 "%s: cm_flags = %#x for remove of handle %#04x! "
553 "This should not happen!\n", __func__, tm->cm_flags,
555 mpssas_free_tm(sc, tm);
560 /* XXX retry the remove after the diag reset completes? */
561 mps_dprint(sc, MPS_FAULT,
562 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
563 mpssas_free_tm(sc, tm);
567 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
568 mps_dprint(sc, MPS_FAULT,
569 "IOCStatus = 0x%x while resetting device 0x%x\n",
570 le16toh(reply->IOCStatus), handle);
571 mpssas_free_tm(sc, tm);
575 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
576 le32toh(reply->TerminationCount));
577 mps_free_reply(sc, tm->cm_reply_data);
578 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
580 /* Reuse the existing command */
581 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
582 memset(req, 0, sizeof(*req));
583 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
584 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
585 req->DevHandle = htole16(handle);
587 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
588 tm->cm_complete = mpssas_remove_complete;
589 tm->cm_complete_data = (void *)(uintptr_t)handle;
591 mps_map_command(sc, tm);
593 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
595 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
598 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
599 ccb = tm->cm_complete_data;
600 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
601 mpssas_scsiio_complete(sc, tm);
606 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
608 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
610 struct mpssas_target *targ;
611 struct mpssas_lun *lun;
615 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
616 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
619 * Currently there should be no way we can hit this case. It only
620 * happens when we have a failure to allocate chain frames, and
621 * task management commands don't have S/G lists.
623 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
624 mps_dprint(sc, MPS_XINFO,
625 "%s: cm_flags = %#x for remove of handle %#04x! "
626 "This should not happen!\n", __func__, tm->cm_flags,
628 mpssas_free_tm(sc, tm);
633 /* most likely a chip reset */
634 mps_dprint(sc, MPS_FAULT,
635 "%s NULL reply removing device 0x%04x\n", __func__, handle);
636 mpssas_free_tm(sc, tm);
640 mps_dprint(sc, MPS_XINFO,
641 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
642 handle, le16toh(reply->IOCStatus));
645 * Don't clear target if remove fails because things will get confusing.
646 * Leave the devname and sasaddr intact so that we know to avoid reusing
647 * this target id if possible, and so we can assign the same target id
648 * to this device if it comes back in the future.
650 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
653 targ->encl_handle = 0x0;
654 targ->encl_slot = 0x0;
655 targ->exp_dev_handle = 0x0;
657 targ->linkrate = 0x0;
661 while(!SLIST_EMPTY(&targ->luns)) {
662 lun = SLIST_FIRST(&targ->luns);
663 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
669 mpssas_free_tm(sc, tm);
673 mpssas_register_events(struct mps_softc *sc)
675 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
678 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
679 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
680 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
681 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
682 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
683 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
684 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
685 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
686 setbit(events, MPI2_EVENT_IR_VOLUME);
687 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
688 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
689 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
691 mps_register_events(sc, events, mpssas_evt_handler, NULL,
692 &sc->sassc->mpssas_eh);
698 mps_attach_sas(struct mps_softc *sc)
700 struct mpssas_softc *sassc;
706 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
708 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
714 * XXX MaxTargets could change during a reinit. Since we don't
715 * resize the targets[] array during such an event, cache the value
716 * of MaxTargets here so that we don't get into trouble later. This
717 * should move into the reinit logic.
719 sassc->maxtargets = sc->facts->MaxTargets;
720 sassc->targets = malloc(sizeof(struct mpssas_target) *
721 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
722 if(!sassc->targets) {
723 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
731 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
732 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
737 unit = device_get_unit(sc->mps_dev);
738 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
739 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
740 if (sassc->sim == NULL) {
741 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
746 TAILQ_INIT(&sassc->ev_queue);
748 /* Initialize taskqueue for Event Handling */
749 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
750 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
751 taskqueue_thread_enqueue, &sassc->ev_tq);
753 /* Run the task queue with lowest priority */
754 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
755 device_get_nameunit(sc->mps_dev));
760 * XXX There should be a bus for every port on the adapter, but since
761 * we're just going to fake the topology for now, we'll pretend that
762 * everything is just a target on a single bus.
764 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
765 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
772 * Assume that discovery events will start right away.
774 * Hold off boot until discovery is complete.
776 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
777 sc->sassc->startup_refcount = 0;
778 mpssas_startup_increment(sassc);
780 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
781 sassc->discovery_timeouts = 0;
786 * Register for async events so we can determine the EEDP
787 * capabilities of devices.
789 status = xpt_create_path(&sassc->path, /*periph*/NULL,
790 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
792 if (status != CAM_REQ_CMP) {
793 mps_printf(sc, "Error %#x creating sim path\n", status);
798 #if (__FreeBSD_version >= 1000006) || \
799 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
800 event = AC_ADVINFO_CHANGED;
802 event = AC_FOUND_DEVICE;
804 status = xpt_register_async(event, mpssas_async, sc,
806 if (status != CAM_REQ_CMP) {
807 mps_dprint(sc, MPS_ERROR,
808 "Error %#x registering async handler for "
809 "AC_ADVINFO_CHANGED events\n", status);
810 xpt_free_path(sassc->path);
814 if (status != CAM_REQ_CMP) {
816 * EEDP use is the exception, not the rule.
817 * Warn the user, but do not fail to attach.
819 mps_printf(sc, "EEDP capabilities disabled.\n");
824 mpssas_register_events(sc);
832 mps_detach_sas(struct mps_softc *sc)
834 struct mpssas_softc *sassc;
835 struct mpssas_lun *lun, *lun_tmp;
836 struct mpssas_target *targ;
841 if (sc->sassc == NULL)
845 mps_deregister_events(sc, sassc->mpssas_eh);
848 * Drain and free the event handling taskqueue with the lock
849 * unheld so that any parallel processing tasks drain properly
850 * without deadlocking.
852 if (sassc->ev_tq != NULL)
853 taskqueue_free(sassc->ev_tq);
855 /* Make sure CAM doesn't wedge if we had to bail out early. */
858 /* Deregister our async handler */
859 if (sassc->path != NULL) {
860 xpt_register_async(0, mpssas_async, sc, sassc->path);
861 xpt_free_path(sassc->path);
865 if (sassc->flags & MPSSAS_IN_STARTUP)
866 xpt_release_simq(sassc->sim, 1);
868 if (sassc->sim != NULL) {
869 xpt_bus_deregister(cam_sim_path(sassc->sim));
870 cam_sim_free(sassc->sim, FALSE);
873 sassc->flags |= MPSSAS_SHUTDOWN;
876 if (sassc->devq != NULL)
877 cam_simq_free(sassc->devq);
879 for(i=0; i< sassc->maxtargets ;i++) {
880 targ = &sassc->targets[i];
881 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
885 free(sassc->targets, M_MPT2);
893 mpssas_discovery_end(struct mpssas_softc *sassc)
895 struct mps_softc *sc = sassc->sc;
899 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
900 callout_stop(&sassc->discovery_callout);
905 mpssas_discovery_timeout(void *data)
907 struct mpssas_softc *sassc = data;
908 struct mps_softc *sc;
914 mps_dprint(sc, MPS_INFO,
915 "Timeout waiting for discovery, interrupts may not be working!\n");
916 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
918 /* Poll the hardware for events in case interrupts aren't working */
921 mps_dprint(sassc->sc, MPS_INFO,
922 "Finished polling after discovery timeout at %d\n", ticks);
924 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
925 mpssas_discovery_end(sassc);
927 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
928 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
929 callout_reset(&sassc->discovery_callout,
930 MPSSAS_DISCOVERY_TIMEOUT * hz,
931 mpssas_discovery_timeout, sassc);
932 sassc->discovery_timeouts++;
934 mps_dprint(sassc->sc, MPS_FAULT,
935 "Discovery timed out, continuing.\n");
936 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
937 mpssas_discovery_end(sassc);
945 mpssas_action(struct cam_sim *sim, union ccb *ccb)
947 struct mpssas_softc *sassc;
949 sassc = cam_sim_softc(sim);
951 MPS_FUNCTRACE(sassc->sc);
952 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
953 ccb->ccb_h.func_code);
954 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
956 switch (ccb->ccb_h.func_code) {
959 struct ccb_pathinq *cpi = &ccb->cpi;
961 cpi->version_num = 1;
962 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
963 cpi->target_sprt = 0;
964 #if __FreeBSD_version >= 1000039
965 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
967 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
969 cpi->hba_eng_cnt = 0;
970 cpi->max_target = sassc->maxtargets - 1;
972 cpi->initiator_id = sassc->maxtargets - 1;
973 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
974 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
975 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
976 cpi->unit_number = cam_sim_unit(sim);
977 cpi->bus_id = cam_sim_bus(sim);
978 cpi->base_transfer_speed = 150000;
979 cpi->transport = XPORT_SAS;
980 cpi->transport_version = 0;
981 cpi->protocol = PROTO_SCSI;
982 cpi->protocol_version = SCSI_REV_SPC;
983 #if __FreeBSD_version >= 800001
985 * XXX KDM where does this number come from?
987 cpi->maxio = 256 * 1024;
989 cpi->ccb_h.status = CAM_REQ_CMP;
992 case XPT_GET_TRAN_SETTINGS:
994 struct ccb_trans_settings *cts;
995 struct ccb_trans_settings_sas *sas;
996 struct ccb_trans_settings_scsi *scsi;
997 struct mpssas_target *targ;
1000 sas = &cts->xport_specific.sas;
1001 scsi = &cts->proto_specific.scsi;
1003 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1004 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1005 cts->ccb_h.target_id));
1006 targ = &sassc->targets[cts->ccb_h.target_id];
1007 if (targ->handle == 0x0) {
1008 cts->ccb_h.status = CAM_SEL_TIMEOUT;
1012 cts->protocol_version = SCSI_REV_SPC2;
1013 cts->transport = XPORT_SAS;
1014 cts->transport_version = 0;
1016 sas->valid = CTS_SAS_VALID_SPEED;
1017 switch (targ->linkrate) {
1019 sas->bitrate = 150000;
1022 sas->bitrate = 300000;
1025 sas->bitrate = 600000;
1031 cts->protocol = PROTO_SCSI;
1032 scsi->valid = CTS_SCSI_VALID_TQ;
1033 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1035 cts->ccb_h.status = CAM_REQ_CMP;
1038 case XPT_CALC_GEOMETRY:
1039 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1040 ccb->ccb_h.status = CAM_REQ_CMP;
1043 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1044 mpssas_action_resetdev(sassc, ccb);
1049 mps_dprint(sassc->sc, MPS_XINFO,
1050 "mpssas_action faking success for abort or reset\n");
1051 ccb->ccb_h.status = CAM_REQ_CMP;
1054 mpssas_action_scsiio(sassc, ccb);
1056 #if __FreeBSD_version >= 900026
1058 mpssas_action_smpio(sassc, ccb);
1062 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1070 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1071 target_id_t target_id, lun_id_t lun_id)
1073 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1074 struct cam_path *path;
1076 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__,
1077 ac_code, target_id, lun_id);
1079 if (xpt_create_path(&path, NULL,
1080 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1081 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1086 xpt_async(ac_code, path, NULL);
1087 xpt_free_path(path);
1091 mpssas_complete_all_commands(struct mps_softc *sc)
1093 struct mps_command *cm;
1098 mtx_assert(&sc->mps_mtx, MA_OWNED);
1100 /* complete all commands with a NULL reply */
1101 for (i = 1; i < sc->num_reqs; i++) {
1102 cm = &sc->commands[i];
1103 cm->cm_reply = NULL;
1106 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1107 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1109 if (cm->cm_complete != NULL) {
1110 mpssas_log_command(cm, MPS_RECOVERY,
1111 "completing cm %p state %x ccb %p for diag reset\n",
1112 cm, cm->cm_state, cm->cm_ccb);
1114 cm->cm_complete(sc, cm);
1118 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1119 mpssas_log_command(cm, MPS_RECOVERY,
1120 "waking up cm %p state %x ccb %p for diag reset\n",
1121 cm, cm->cm_state, cm->cm_ccb);
1126 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1127 /* this should never happen, but if it does, log */
1128 mpssas_log_command(cm, MPS_RECOVERY,
1129 "cm %p state %x flags 0x%x ccb %p during diag "
1130 "reset\n", cm, cm->cm_state, cm->cm_flags,
1137 mpssas_handle_reinit(struct mps_softc *sc)
1141 /* Go back into startup mode and freeze the simq, so that CAM
1142 * doesn't send any commands until after we've rediscovered all
1143 * targets and found the proper device handles for them.
1145 * After the reset, portenable will trigger discovery, and after all
1146 * discovery-related activities have finished, the simq will be
1149 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1150 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1151 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1152 mpssas_startup_increment(sc->sassc);
1154 /* notify CAM of a bus reset */
1155 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1158 /* complete and cleanup after all outstanding commands */
1159 mpssas_complete_all_commands(sc);
1161 mps_dprint(sc, MPS_INIT,
1162 "%s startup %u tm %u after command completion\n",
1163 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1165 /* zero all the target handles, since they may change after the
1166 * reset, and we have to rediscover all the targets and use the new
1169 for (i = 0; i < sc->sassc->maxtargets; i++) {
1170 if (sc->sassc->targets[i].outstanding != 0)
1171 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1172 i, sc->sassc->targets[i].outstanding);
1173 sc->sassc->targets[i].handle = 0x0;
1174 sc->sassc->targets[i].exp_dev_handle = 0x0;
1175 sc->sassc->targets[i].outstanding = 0;
1176 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1181 mpssas_tm_timeout(void *data)
1183 struct mps_command *tm = data;
1184 struct mps_softc *sc = tm->cm_sc;
1186 mtx_assert(&sc->mps_mtx, MA_OWNED);
1188 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1189 "task mgmt %p timed out\n", tm);
1194 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1196 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1197 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1198 unsigned int cm_count = 0;
1199 struct mps_command *cm;
1200 struct mpssas_target *targ;
1202 callout_stop(&tm->cm_callout);
1204 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1205 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1209 * Currently there should be no way we can hit this case. It only
1210 * happens when we have a failure to allocate chain frames, and
1211 * task management commands don't have S/G lists.
1212 * XXXSL So should it be an assertion?
1214 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1215 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1216 "This should not happen!\n", __func__, tm->cm_flags);
1217 mpssas_free_tm(sc, tm);
1221 if (reply == NULL) {
1222 mpssas_log_command(tm, MPS_RECOVERY,
1223 "NULL reset reply for tm %p\n", tm);
1224 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1225 /* this completion was due to a reset, just cleanup */
1226 targ->flags &= ~MPSSAS_TARGET_INRESET;
1228 mpssas_free_tm(sc, tm);
1231 /* we should have gotten a reply. */
1237 mpssas_log_command(tm, MPS_RECOVERY,
1238 "logical unit reset status 0x%x code 0x%x count %u\n",
1239 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1240 le32toh(reply->TerminationCount));
1242 /* See if there are any outstanding commands for this LUN.
1243 * This could be made more efficient by using a per-LU data
1244 * structure of some sort.
1246 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1247 if (cm->cm_lun == tm->cm_lun)
1251 if (cm_count == 0) {
1252 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1253 "logical unit %u finished recovery after reset\n",
1256 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1259 /* we've finished recovery for this logical unit. check and
1260 * see if some other logical unit has a timedout command
1261 * that needs to be processed.
1263 cm = TAILQ_FIRST(&targ->timedout_commands);
1265 mpssas_send_abort(sc, tm, cm);
1269 mpssas_free_tm(sc, tm);
1273 /* if we still have commands for this LUN, the reset
1274 * effectively failed, regardless of the status reported.
1275 * Escalate to a target reset.
1277 mpssas_log_command(tm, MPS_RECOVERY,
1278 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1280 mpssas_send_reset(sc, tm,
1281 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1286 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1288 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1289 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1290 struct mpssas_target *targ;
1292 callout_stop(&tm->cm_callout);
1294 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1295 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1299 * Currently there should be no way we can hit this case. It only
1300 * happens when we have a failure to allocate chain frames, and
1301 * task management commands don't have S/G lists.
1303 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1304 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1305 "This should not happen!\n", __func__, tm->cm_flags);
1306 mpssas_free_tm(sc, tm);
1310 if (reply == NULL) {
1311 mpssas_log_command(tm, MPS_RECOVERY,
1312 "NULL reset reply for tm %p\n", tm);
1313 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1314 /* this completion was due to a reset, just cleanup */
1315 targ->flags &= ~MPSSAS_TARGET_INRESET;
1317 mpssas_free_tm(sc, tm);
1320 /* we should have gotten a reply. */
1326 mpssas_log_command(tm, MPS_RECOVERY,
1327 "target reset status 0x%x code 0x%x count %u\n",
1328 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1329 le32toh(reply->TerminationCount));
1331 targ->flags &= ~MPSSAS_TARGET_INRESET;
1333 if (targ->outstanding == 0) {
1334 /* we've finished recovery for this target and all
1335 * of its logical units.
1337 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1338 "recovery finished after target reset\n");
1340 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1344 mpssas_free_tm(sc, tm);
1347 /* after a target reset, if this target still has
1348 * outstanding commands, the reset effectively failed,
1349 * regardless of the status reported. escalate.
1351 mpssas_log_command(tm, MPS_RECOVERY,
1352 "target reset complete for tm %p, but still have %u command(s)\n",
1353 tm, targ->outstanding);
1358 #define MPS_RESET_TIMEOUT 30
1361 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1363 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1364 struct mpssas_target *target;
1367 target = tm->cm_targ;
1368 if (target->handle == 0) {
1369 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1370 __func__, target->tid);
1374 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1375 req->DevHandle = htole16(target->handle);
1376 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1377 req->TaskType = type;
1379 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1380 /* XXX Need to handle invalid LUNs */
1381 MPS_SET_LUN(req->LUN, tm->cm_lun);
1382 tm->cm_targ->logical_unit_resets++;
1383 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1384 "sending logical unit reset\n");
1385 tm->cm_complete = mpssas_logical_unit_reset_complete;
1387 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1388 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1389 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1390 tm->cm_targ->target_resets++;
1391 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1392 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1393 "sending target reset\n");
1394 tm->cm_complete = mpssas_target_reset_complete;
1397 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1402 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1403 tm->cm_complete_data = (void *)tm;
1405 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1406 mpssas_tm_timeout, tm);
1408 err = mps_map_command(sc, tm);
1410 mpssas_log_command(tm, MPS_RECOVERY,
1411 "error %d sending reset type %u\n",
1419 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1421 struct mps_command *cm;
1422 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1423 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1424 struct mpssas_target *targ;
1426 callout_stop(&tm->cm_callout);
1428 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1429 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1433 * Currently there should be no way we can hit this case. It only
1434 * happens when we have a failure to allocate chain frames, and
1435 * task management commands don't have S/G lists.
1437 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1438 mpssas_log_command(tm, MPS_RECOVERY,
1439 "cm_flags = %#x for abort %p TaskMID %u!\n",
1440 tm->cm_flags, tm, le16toh(req->TaskMID));
1441 mpssas_free_tm(sc, tm);
1445 if (reply == NULL) {
1446 mpssas_log_command(tm, MPS_RECOVERY,
1447 "NULL abort reply for tm %p TaskMID %u\n",
1448 tm, le16toh(req->TaskMID));
1449 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1450 /* this completion was due to a reset, just cleanup */
1452 mpssas_free_tm(sc, tm);
1455 /* we should have gotten a reply. */
1461 mpssas_log_command(tm, MPS_RECOVERY,
1462 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1463 le16toh(req->TaskMID),
1464 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1465 le32toh(reply->TerminationCount));
1467 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1469 /* if there are no more timedout commands, we're done with
1470 * error recovery for this target.
1472 mpssas_log_command(tm, MPS_RECOVERY,
1473 "finished recovery after aborting TaskMID %u\n",
1474 le16toh(req->TaskMID));
1477 mpssas_free_tm(sc, tm);
1479 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1480 /* abort success, but we have more timedout commands to abort */
1481 mpssas_log_command(tm, MPS_RECOVERY,
1482 "continuing recovery after aborting TaskMID %u\n",
1483 le16toh(req->TaskMID));
1485 mpssas_send_abort(sc, tm, cm);
1488 /* we didn't get a command completion, so the abort
1489 * failed as far as we're concerned. escalate.
1491 mpssas_log_command(tm, MPS_RECOVERY,
1492 "abort failed for TaskMID %u tm %p\n",
1493 le16toh(req->TaskMID), tm);
1495 mpssas_send_reset(sc, tm,
1496 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1500 #define MPS_ABORT_TIMEOUT 5
1503 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1505 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1506 struct mpssas_target *targ;
1510 if (targ->handle == 0) {
1511 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1512 __func__, cm->cm_ccb->ccb_h.target_id);
1516 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1517 "Aborting command %p\n", cm);
1519 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1520 req->DevHandle = htole16(targ->handle);
1521 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1522 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1524 /* XXX Need to handle invalid LUNs */
1525 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1527 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1530 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1531 tm->cm_complete = mpssas_abort_complete;
1532 tm->cm_complete_data = (void *)tm;
1533 tm->cm_targ = cm->cm_targ;
1534 tm->cm_lun = cm->cm_lun;
1536 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1537 mpssas_tm_timeout, tm);
1541 err = mps_map_command(sc, tm);
1543 mpssas_log_command(tm, MPS_RECOVERY,
1544 "error %d sending abort for cm %p SMID %u\n",
1545 err, cm, req->TaskMID);
1551 mpssas_scsiio_timeout(void *data)
1553 struct mps_softc *sc;
1554 struct mps_command *cm;
1555 struct mpssas_target *targ;
1557 cm = (struct mps_command *)data;
1561 mtx_assert(&sc->mps_mtx, MA_OWNED);
1563 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1566 * Run the interrupt handler to make sure it's not pending. This
1567 * isn't perfect because the command could have already completed
1568 * and been re-used, though this is unlikely.
1570 mps_intr_locked(sc);
1571 if (cm->cm_state == MPS_CM_STATE_FREE) {
1572 mpssas_log_command(cm, MPS_XINFO,
1573 "SCSI command %p almost timed out\n", cm);
1577 if (cm->cm_ccb == NULL) {
1578 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1582 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1588 /* XXX first, check the firmware state, to see if it's still
1589 * operational. if not, do a diag reset.
1592 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1593 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1594 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1596 if (targ->tm != NULL) {
1597 /* target already in recovery, just queue up another
1598 * timedout command to be processed later.
1600 mps_dprint(sc, MPS_RECOVERY,
1601 "queued timedout cm %p for processing by tm %p\n",
1604 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1605 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1608 /* start recovery by aborting the first timedout command */
1609 mpssas_send_abort(sc, targ->tm, cm);
1612 /* XXX queue this target up for recovery once a TM becomes
1613 * available. The firmware only has a limited number of
1614 * HighPriority credits for the high priority requests used
1615 * for task management, and we ran out.
1617 * Isilon: don't worry about this for now, since we have
1618 * more credits than disks in an enclosure, and limit
1619 * ourselves to one TM per target for recovery.
1621 mps_dprint(sc, MPS_RECOVERY,
1622 "timedout cm %p failed to allocate a tm\n", cm);
1628 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1630 MPI2_SCSI_IO_REQUEST *req;
1631 struct ccb_scsiio *csio;
1632 struct mps_softc *sc;
1633 struct mpssas_target *targ;
1634 struct mpssas_lun *lun;
1635 struct mps_command *cm;
1636 uint8_t i, lba_byte, *ref_tag_addr;
1637 uint16_t eedp_flags;
1638 uint32_t mpi_control;
1642 mtx_assert(&sc->mps_mtx, MA_OWNED);
1645 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1646 ("Target %d out of bounds in XPT_SCSI_IO\n",
1647 csio->ccb_h.target_id));
1648 targ = &sassc->targets[csio->ccb_h.target_id];
1649 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1650 if (targ->handle == 0x0) {
1651 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1652 __func__, csio->ccb_h.target_id);
1653 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1657 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1658 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1659 "supported %u\n", __func__, csio->ccb_h.target_id);
1660 csio->ccb_h.status = CAM_TID_INVALID;
1665 * Sometimes, it is possible to get a command that is not "In
1666 * Progress" and was actually aborted by the upper layer. Check for
1667 * this here and complete the command without error.
1669 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1670 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1671 "target %u\n", __func__, csio->ccb_h.target_id);
1676 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1677 * that the volume has timed out. We want volumes to be enumerated
1678 * until they are deleted/removed, not just failed.
1680 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1681 if (targ->devinfo == 0)
1682 csio->ccb_h.status = CAM_REQ_CMP;
1684 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1689 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1690 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1691 csio->ccb_h.status = CAM_TID_INVALID;
1696 cm = mps_alloc_command(sc);
1698 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1699 xpt_freeze_simq(sassc->sim, 1);
1700 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1702 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1703 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1708 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1709 bzero(req, sizeof(*req));
1710 req->DevHandle = htole16(targ->handle);
1711 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1713 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1714 req->SenseBufferLength = MPS_SENSE_LEN;
1716 req->ChainOffset = 0;
1717 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1722 req->DataLength = htole32(csio->dxfer_len);
1723 req->BidirectionalDataLength = 0;
1724 req->IoFlags = htole16(csio->cdb_len);
1727 /* Note: BiDirectional transfers are not supported */
1728 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1730 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1731 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1734 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1735 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1739 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1743 if (csio->cdb_len == 32)
1744 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1746 * It looks like the hardware doesn't require an explicit tag
1747 * number for each transaction. SAM Task Management not supported
1750 switch (csio->tag_action) {
1751 case MSG_HEAD_OF_Q_TAG:
1752 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1754 case MSG_ORDERED_Q_TAG:
1755 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1758 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1760 case CAM_TAG_ACTION_NONE:
1761 case MSG_SIMPLE_Q_TAG:
1763 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1766 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1767 req->Control = htole32(mpi_control);
1768 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1769 mps_free_command(sc, cm);
1770 ccb->ccb_h.status = CAM_LUN_INVALID;
1775 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1776 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1778 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1779 req->IoFlags = htole16(csio->cdb_len);
1782 * Check if EEDP is supported and enabled. If it is then check if the
1783 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1784 * is formatted for EEDP support. If all of this is true, set CDB up
1785 * for EEDP transfer.
1787 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1788 if (sc->eedp_enabled && eedp_flags) {
1789 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1790 if (lun->lun_id == csio->ccb_h.target_lun) {
1795 if ((lun != NULL) && (lun->eedp_formatted)) {
1796 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1797 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1798 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1799 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1800 req->EEDPFlags = htole16(eedp_flags);
1803 * If CDB less than 32, fill in Primary Ref Tag with
1804 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1805 * already there. Also, set protection bit. FreeBSD
1806 * currently does not support CDBs bigger than 16, but
1807 * the code doesn't hurt, and will be here for the
1810 if (csio->cdb_len != 32) {
1811 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1812 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1813 PrimaryReferenceTag;
1814 for (i = 0; i < 4; i++) {
1816 req->CDB.CDB32[lba_byte + i];
1819 req->CDB.EEDP32.PrimaryReferenceTag =
1820 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1821 req->CDB.EEDP32.PrimaryApplicationTagMask =
1823 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1827 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1828 req->EEDPFlags = htole16(eedp_flags);
1829 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1835 cm->cm_length = csio->dxfer_len;
1836 if (cm->cm_length != 0) {
1838 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1842 cm->cm_sge = &req->SGL;
1843 cm->cm_sglsize = (32 - 24) * 4;
1844 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1845 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1846 cm->cm_complete = mpssas_scsiio_complete;
1847 cm->cm_complete_data = ccb;
1849 cm->cm_lun = csio->ccb_h.target_lun;
1853 * If HBA is a WD and the command is not for a retry, try to build a
1854 * direct I/O message. If failed, or the command is for a retry, send
1855 * the I/O to the IR volume itself.
1857 if (sc->WD_valid_config) {
1858 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1859 mpssas_direct_drive_io(sassc, cm, ccb);
1861 ccb->ccb_h.status = CAM_REQ_INPROG;
1865 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1866 mpssas_scsiio_timeout, cm);
1869 targ->outstanding++;
1870 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1871 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1873 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1874 __func__, cm, ccb, targ->outstanding);
1876 mps_map_command(sc, cm);
1881 mps_response_code(struct mps_softc *sc, u8 response_code)
1885 switch (response_code) {
1886 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1887 desc = "task management request completed";
1889 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1890 desc = "invalid frame";
1892 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1893 desc = "task management request not supported";
1895 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1896 desc = "task management request failed";
1898 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1899 desc = "task management request succeeded";
1901 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1902 desc = "invalid lun";
1905 desc = "overlapped tag attempted";
1907 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1908 desc = "task queued, however not sent to target";
1914 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1915 response_code, desc);
1918 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1921 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1922 Mpi2SCSIIOReply_t *mpi_reply)
1926 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1927 MPI2_IOCSTATUS_MASK;
1928 u8 scsi_state = mpi_reply->SCSIState;
1929 u8 scsi_status = mpi_reply->SCSIStatus;
1930 char *desc_ioc_state = NULL;
1931 char *desc_scsi_status = NULL;
1932 char *desc_scsi_state = sc->tmp_string;
1933 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1935 if (log_info == 0x31170000)
1938 switch (ioc_status) {
1939 case MPI2_IOCSTATUS_SUCCESS:
1940 desc_ioc_state = "success";
1942 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1943 desc_ioc_state = "invalid function";
1945 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1946 desc_ioc_state = "scsi recovered error";
1948 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1949 desc_ioc_state = "scsi invalid dev handle";
1951 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1952 desc_ioc_state = "scsi device not there";
1954 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1955 desc_ioc_state = "scsi data overrun";
1957 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1958 desc_ioc_state = "scsi data underrun";
1960 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1961 desc_ioc_state = "scsi io data error";
1963 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1964 desc_ioc_state = "scsi protocol error";
1966 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1967 desc_ioc_state = "scsi task terminated";
1969 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1970 desc_ioc_state = "scsi residual mismatch";
1972 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1973 desc_ioc_state = "scsi task mgmt failed";
1975 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1976 desc_ioc_state = "scsi ioc terminated";
1978 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1979 desc_ioc_state = "scsi ext terminated";
1981 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1982 desc_ioc_state = "eedp guard error";
1984 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1985 desc_ioc_state = "eedp ref tag error";
1987 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1988 desc_ioc_state = "eedp app tag error";
1991 desc_ioc_state = "unknown";
1995 switch (scsi_status) {
1996 case MPI2_SCSI_STATUS_GOOD:
1997 desc_scsi_status = "good";
1999 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2000 desc_scsi_status = "check condition";
2002 case MPI2_SCSI_STATUS_CONDITION_MET:
2003 desc_scsi_status = "condition met";
2005 case MPI2_SCSI_STATUS_BUSY:
2006 desc_scsi_status = "busy";
2008 case MPI2_SCSI_STATUS_INTERMEDIATE:
2009 desc_scsi_status = "intermediate";
2011 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2012 desc_scsi_status = "intermediate condmet";
2014 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2015 desc_scsi_status = "reservation conflict";
2017 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2018 desc_scsi_status = "command terminated";
2020 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2021 desc_scsi_status = "task set full";
2023 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2024 desc_scsi_status = "aca active";
2026 case MPI2_SCSI_STATUS_TASK_ABORTED:
2027 desc_scsi_status = "task aborted";
2030 desc_scsi_status = "unknown";
2034 desc_scsi_state[0] = '\0';
2036 desc_scsi_state = " ";
2037 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2038 strcat(desc_scsi_state, "response info ");
2039 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2040 strcat(desc_scsi_state, "state terminated ");
2041 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2042 strcat(desc_scsi_state, "no status ");
2043 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2044 strcat(desc_scsi_state, "autosense failed ");
2045 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2046 strcat(desc_scsi_state, "autosense valid ");
2048 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2049 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2050 /* We can add more detail about underflow data here
2053 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2054 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2055 desc_scsi_state, scsi_state);
2057 if (sc->mps_debug & MPS_XINFO &&
2058 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2059 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2060 scsi_sense_print(csio);
2061 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2064 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2065 response_info = le32toh(mpi_reply->ResponseInfo);
2066 response_bytes = (u8 *)&response_info;
2067 mps_response_code(sc,response_bytes[0]);
2072 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2074 MPI2_SCSI_IO_REPLY *rep;
2076 struct ccb_scsiio *csio;
2077 struct mpssas_softc *sassc;
2078 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2079 u8 *TLR_bits, TLR_on;
2084 mps_dprint(sc, MPS_TRACE,
2085 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2086 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2087 cm->cm_targ->outstanding);
2089 callout_stop(&cm->cm_callout);
2090 mtx_assert(&sc->mps_mtx, MA_OWNED);
2093 ccb = cm->cm_complete_data;
2095 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2097 * XXX KDM if the chain allocation fails, does it matter if we do
2098 * the sync and unload here? It is simpler to do it in every case,
2099 * assuming it doesn't cause problems.
2101 if (cm->cm_data != NULL) {
2102 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2103 dir = BUS_DMASYNC_POSTREAD;
2104 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2105 dir = BUS_DMASYNC_POSTWRITE;
2106 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2107 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2110 cm->cm_targ->completed++;
2111 cm->cm_targ->outstanding--;
2112 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2113 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2115 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2116 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2117 if (cm->cm_reply != NULL)
2118 mpssas_log_command(cm, MPS_RECOVERY,
2119 "completed timedout cm %p ccb %p during recovery "
2120 "ioc %x scsi %x state %x xfer %u\n",
2122 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2123 le32toh(rep->TransferCount));
2125 mpssas_log_command(cm, MPS_RECOVERY,
2126 "completed timedout cm %p ccb %p during recovery\n",
2128 } else if (cm->cm_targ->tm != NULL) {
2129 if (cm->cm_reply != NULL)
2130 mpssas_log_command(cm, MPS_RECOVERY,
2131 "completed cm %p ccb %p during recovery "
2132 "ioc %x scsi %x state %x xfer %u\n",
2134 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2135 le32toh(rep->TransferCount));
2137 mpssas_log_command(cm, MPS_RECOVERY,
2138 "completed cm %p ccb %p during recovery\n",
2140 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2141 mpssas_log_command(cm, MPS_RECOVERY,
2142 "reset completed cm %p ccb %p\n",
2146 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2148 * We ran into an error after we tried to map the command,
2149 * so we're getting a callback without queueing the command
2150 * to the hardware. So we set the status here, and it will
2151 * be retained below. We'll go through the "fast path",
2152 * because there can be no reply when we haven't actually
2153 * gone out to the hardware.
2155 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2158 * Currently the only error included in the mask is
2159 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2160 * chain frames. We need to freeze the queue until we get
2161 * a command that completed without this error, which will
2162 * hopefully have some chain frames attached that we can
2163 * use. If we wanted to get smarter about it, we would
2164 * only unfreeze the queue in this condition when we're
2165 * sure that we're getting some chain frames back. That's
2166 * probably unnecessary.
2168 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2169 xpt_freeze_simq(sassc->sim, 1);
2170 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2171 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2172 "freezing SIM queue\n");
2176 /* Take the fast path to completion */
2177 if (cm->cm_reply == NULL) {
2178 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2179 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2180 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2182 ccb->ccb_h.status = CAM_REQ_CMP;
2183 ccb->csio.scsi_status = SCSI_STATUS_OK;
2185 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2186 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2187 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2188 mps_dprint(sc, MPS_XINFO,
2189 "Unfreezing SIM queue\n");
2194 * There are two scenarios where the status won't be
2195 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2196 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2198 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2200 * Freeze the dev queue so that commands are
2201 * executed in the correct order with after error
2204 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2205 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2207 mps_free_command(sc, cm);
2212 mpssas_log_command(cm, MPS_XINFO,
2213 "ioc %x scsi %x state %x xfer %u\n",
2214 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2215 le32toh(rep->TransferCount));
2218 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2219 * Volume if an error occurred (normal I/O retry). Use the original
2220 * CCB, but set a flag that this will be a retry so that it's sent to
2221 * the original volume. Free the command but reuse the CCB.
2223 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2224 mps_free_command(sc, cm);
2225 ccb->ccb_h.status = MPS_WD_RETRY;
2226 mpssas_action_scsiio(sassc, ccb);
2230 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2231 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2232 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2234 case MPI2_IOCSTATUS_SUCCESS:
2235 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2237 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2238 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2239 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2241 /* Completion failed at the transport level. */
2242 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2243 MPI2_SCSI_STATE_TERMINATED)) {
2244 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2248 /* In a modern packetized environment, an autosense failure
2249 * implies that there's not much else that can be done to
2250 * recover the command.
2252 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2253 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2258 * CAM doesn't care about SAS Response Info data, but if this is
2259 * the state check if TLR should be done. If not, clear the
2260 * TLR_bits for the target.
2262 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2263 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2264 MPS_SCSI_RI_INVALID_FRAME)) {
2265 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2266 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2270 * Intentionally override the normal SCSI status reporting
2271 * for these two cases. These are likely to happen in a
2272 * multi-initiator environment, and we want to make sure that
2273 * CAM retries these commands rather than fail them.
2275 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2276 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2277 ccb->ccb_h.status = CAM_REQ_ABORTED;
2281 /* Handle normal status and sense */
2282 csio->scsi_status = rep->SCSIStatus;
2283 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2284 ccb->ccb_h.status = CAM_REQ_CMP;
2286 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2288 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2289 int sense_len, returned_sense_len;
2291 returned_sense_len = min(le32toh(rep->SenseCount),
2292 sizeof(struct scsi_sense_data));
2293 if (returned_sense_len < ccb->csio.sense_len)
2294 ccb->csio.sense_resid = ccb->csio.sense_len -
2297 ccb->csio.sense_resid = 0;
2299 sense_len = min(returned_sense_len,
2300 ccb->csio.sense_len - ccb->csio.sense_resid);
2301 bzero(&ccb->csio.sense_data,
2302 sizeof(ccb->csio.sense_data));
2303 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2304 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2308 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2309 * and it's page code 0 (Supported Page List), and there is
2310 * inquiry data, and this is for a sequential access device, and
2311 * the device is an SSP target, and TLR is supported by the
2312 * controller, turn the TLR_bits value ON if page 0x90 is
2315 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2316 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2317 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2318 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2319 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2320 T_SEQUENTIAL) && (sc->control_TLR) &&
2321 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2322 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2323 vpd_list = (struct scsi_vpd_supported_page_list *)
2325 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2327 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2328 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2329 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2330 csio->cdb_io.cdb_bytes[4];
2331 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2332 if (vpd_list->list[i] == 0x90) {
2339 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2340 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2342 * If devinfo is 0 this will be a volume. In that case don't
2343 * tell CAM that the volume is not there. We want volumes to
2344 * be enumerated until they are deleted/removed, not just
2347 if (cm->cm_targ->devinfo == 0)
2348 ccb->ccb_h.status = CAM_REQ_CMP;
2350 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2352 case MPI2_IOCSTATUS_INVALID_SGL:
2353 mps_print_scsiio_cmd(sc, cm);
2354 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2356 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2358 * This is one of the responses that comes back when an I/O
2359 * has been aborted. If it is because of a timeout that we
2360 * initiated, just set the status to CAM_CMD_TIMEOUT.
2361 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2362 * command is the same (it gets retried, subject to the
2363 * retry counter), the only difference is what gets printed
2366 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2367 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2369 ccb->ccb_h.status = CAM_REQ_ABORTED;
2371 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2372 /* resid is ignored for this condition */
2374 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2376 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2377 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2379 * Since these are generally external (i.e. hopefully
2380 * transient transport-related) errors, retry these without
2381 * decrementing the retry count.
2383 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2384 mpssas_log_command(cm, MPS_INFO,
2385 "terminated ioc %x scsi %x state %x xfer %u\n",
2386 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2387 le32toh(rep->TransferCount));
2389 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2390 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2391 case MPI2_IOCSTATUS_INVALID_VPID:
2392 case MPI2_IOCSTATUS_INVALID_FIELD:
2393 case MPI2_IOCSTATUS_INVALID_STATE:
2394 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2395 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2396 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2397 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2398 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2400 mpssas_log_command(cm, MPS_XINFO,
2401 "completed ioc %x scsi %x state %x xfer %u\n",
2402 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2403 le32toh(rep->TransferCount));
2404 csio->resid = cm->cm_length;
2405 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2409 mps_sc_failed_io_info(sc,csio,rep);
2411 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2412 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2413 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2414 mps_dprint(sc, MPS_XINFO, "Command completed, "
2415 "unfreezing SIM queue\n");
2418 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2419 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2420 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2423 mps_free_command(sc, cm);
2427 /* All Request reached here are Endian safe */
2429 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2431 pMpi2SCSIIORequest_t pIO_req;
2432 struct mps_softc *sc = sassc->sc;
2434 uint32_t physLBA, stripe_offset, stripe_unit;
2435 uint32_t io_size, column;
2436 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2439 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2440 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2441 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2442 * bit different than the 10/16 CDBs, handle them separately.
2444 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2445 CDB = pIO_req->CDB.CDB32;
2448 * Handle 6 byte CDBs.
2450 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2451 (CDB[0] == WRITE_6))) {
2453 * Get the transfer size in blocks.
2455 io_size = (cm->cm_length >> sc->DD_block_exponent);
2458 * Get virtual LBA given in the CDB.
2460 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2461 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2464 * Check that LBA range for I/O does not exceed volume's
2467 if ((virtLBA + (uint64_t)io_size - 1) <=
2470 * Check if the I/O crosses a stripe boundary. If not,
2471 * translate the virtual LBA to a physical LBA and set
2472 * the DevHandle for the PhysDisk to be used. If it
2473 * does cross a boundry, do normal I/O. To get the
2474 * right DevHandle to use, get the map number for the
2475 * column, then use that map number to look up the
2476 * DevHandle of the PhysDisk.
2478 stripe_offset = (uint32_t)virtLBA &
2479 (sc->DD_stripe_size - 1);
2480 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2481 physLBA = (uint32_t)virtLBA >>
2482 sc->DD_stripe_exponent;
2483 stripe_unit = physLBA / sc->DD_num_phys_disks;
2484 column = physLBA % sc->DD_num_phys_disks;
2485 pIO_req->DevHandle =
2486 htole16(sc->DD_column_map[column].dev_handle);
2487 /* ???? Is this endian safe*/
2488 cm->cm_desc.SCSIIO.DevHandle =
2491 physLBA = (stripe_unit <<
2492 sc->DD_stripe_exponent) + stripe_offset;
2493 ptrLBA = &pIO_req->CDB.CDB32[1];
2494 physLBA_byte = (uint8_t)(physLBA >> 16);
2495 *ptrLBA = physLBA_byte;
2496 ptrLBA = &pIO_req->CDB.CDB32[2];
2497 physLBA_byte = (uint8_t)(physLBA >> 8);
2498 *ptrLBA = physLBA_byte;
2499 ptrLBA = &pIO_req->CDB.CDB32[3];
2500 physLBA_byte = (uint8_t)physLBA;
2501 *ptrLBA = physLBA_byte;
2504 * Set flag that Direct Drive I/O is
2507 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2514 * Handle 10, 12 or 16 byte CDBs.
2516 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2517 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2518 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2519 (CDB[0] == WRITE_12))) {
2521 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2522 * are 0. If not, this is accessing beyond 2TB so handle it in
2523 * the else section. 10-byte and 12-byte CDB's are OK.
2524 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2525 * ready to accept 12byte CDB for Direct IOs.
2527 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2528 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2529 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2531 * Get the transfer size in blocks.
2533 io_size = (cm->cm_length >> sc->DD_block_exponent);
2536 * Get virtual LBA. Point to correct lower 4 bytes of
2537 * LBA in the CDB depending on command.
2539 lba_idx = ((CDB[0] == READ_12) ||
2540 (CDB[0] == WRITE_12) ||
2541 (CDB[0] == READ_10) ||
2542 (CDB[0] == WRITE_10))? 2 : 6;
2543 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2544 ((uint64_t)CDB[lba_idx + 1] << 16) |
2545 ((uint64_t)CDB[lba_idx + 2] << 8) |
2546 (uint64_t)CDB[lba_idx + 3];
2549 * Check that LBA range for I/O does not exceed volume's
2552 if ((virtLBA + (uint64_t)io_size - 1) <=
2555 * Check if the I/O crosses a stripe boundary.
2556 * If not, translate the virtual LBA to a
2557 * physical LBA and set the DevHandle for the
2558 * PhysDisk to be used. If it does cross a
2559 * boundry, do normal I/O. To get the right
2560 * DevHandle to use, get the map number for the
2561 * column, then use that map number to look up
2562 * the DevHandle of the PhysDisk.
2564 stripe_offset = (uint32_t)virtLBA &
2565 (sc->DD_stripe_size - 1);
2566 if ((stripe_offset + io_size) <=
2567 sc->DD_stripe_size) {
2568 physLBA = (uint32_t)virtLBA >>
2569 sc->DD_stripe_exponent;
2570 stripe_unit = physLBA /
2571 sc->DD_num_phys_disks;
2573 sc->DD_num_phys_disks;
2574 pIO_req->DevHandle =
2575 htole16(sc->DD_column_map[column].
2577 cm->cm_desc.SCSIIO.DevHandle =
2580 physLBA = (stripe_unit <<
2581 sc->DD_stripe_exponent) +
2584 &pIO_req->CDB.CDB32[lba_idx];
2585 physLBA_byte = (uint8_t)(physLBA >> 24);
2586 *ptrLBA = physLBA_byte;
2588 &pIO_req->CDB.CDB32[lba_idx + 1];
2589 physLBA_byte = (uint8_t)(physLBA >> 16);
2590 *ptrLBA = physLBA_byte;
2592 &pIO_req->CDB.CDB32[lba_idx + 2];
2593 physLBA_byte = (uint8_t)(physLBA >> 8);
2594 *ptrLBA = physLBA_byte;
2596 &pIO_req->CDB.CDB32[lba_idx + 3];
2597 physLBA_byte = (uint8_t)physLBA;
2598 *ptrLBA = physLBA_byte;
2601 * Set flag that Direct Drive I/O is
2604 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2609 * 16-byte CDB and the upper 4 bytes of the CDB are not
2610 * 0. Get the transfer size in blocks.
2612 io_size = (cm->cm_length >> sc->DD_block_exponent);
2617 virtLBA = ((uint64_t)CDB[2] << 54) |
2618 ((uint64_t)CDB[3] << 48) |
2619 ((uint64_t)CDB[4] << 40) |
2620 ((uint64_t)CDB[5] << 32) |
2621 ((uint64_t)CDB[6] << 24) |
2622 ((uint64_t)CDB[7] << 16) |
2623 ((uint64_t)CDB[8] << 8) |
2627 * Check that LBA range for I/O does not exceed volume's
2630 if ((virtLBA + (uint64_t)io_size - 1) <=
2633 * Check if the I/O crosses a stripe boundary.
2634 * If not, translate the virtual LBA to a
2635 * physical LBA and set the DevHandle for the
2636 * PhysDisk to be used. If it does cross a
2637 * boundry, do normal I/O. To get the right
2638 * DevHandle to use, get the map number for the
2639 * column, then use that map number to look up
2640 * the DevHandle of the PhysDisk.
2642 stripe_offset = (uint32_t)virtLBA &
2643 (sc->DD_stripe_size - 1);
2644 if ((stripe_offset + io_size) <=
2645 sc->DD_stripe_size) {
2646 physLBA = (uint32_t)(virtLBA >>
2647 sc->DD_stripe_exponent);
2648 stripe_unit = physLBA /
2649 sc->DD_num_phys_disks;
2651 sc->DD_num_phys_disks;
2652 pIO_req->DevHandle =
2653 htole16(sc->DD_column_map[column].
2655 cm->cm_desc.SCSIIO.DevHandle =
2658 physLBA = (stripe_unit <<
2659 sc->DD_stripe_exponent) +
2663 * Set upper 4 bytes of LBA to 0. We
2664 * assume that the phys disks are less
2665 * than 2 TB's in size. Then, set the
2668 pIO_req->CDB.CDB32[2] = 0;
2669 pIO_req->CDB.CDB32[3] = 0;
2670 pIO_req->CDB.CDB32[4] = 0;
2671 pIO_req->CDB.CDB32[5] = 0;
2672 ptrLBA = &pIO_req->CDB.CDB32[6];
2673 physLBA_byte = (uint8_t)(physLBA >> 24);
2674 *ptrLBA = physLBA_byte;
2675 ptrLBA = &pIO_req->CDB.CDB32[7];
2676 physLBA_byte = (uint8_t)(physLBA >> 16);
2677 *ptrLBA = physLBA_byte;
2678 ptrLBA = &pIO_req->CDB.CDB32[8];
2679 physLBA_byte = (uint8_t)(physLBA >> 8);
2680 *ptrLBA = physLBA_byte;
2681 ptrLBA = &pIO_req->CDB.CDB32[9];
2682 physLBA_byte = (uint8_t)physLBA;
2683 *ptrLBA = physLBA_byte;
2686 * Set flag that Direct Drive I/O is
2689 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2696 #if __FreeBSD_version >= 900026
2698 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2700 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2701 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2705 ccb = cm->cm_complete_data;
2708 * Currently there should be no way we can hit this case. It only
2709 * happens when we have a failure to allocate chain frames, and SMP
2710 * commands require two S/G elements only. That should be handled
2711 * in the standard request size.
2713 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2714 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2715 __func__, cm->cm_flags);
2716 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2720 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2722 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2723 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2727 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2728 sasaddr = le32toh(req->SASAddress.Low);
2729 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2731 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2732 MPI2_IOCSTATUS_SUCCESS ||
2733 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2734 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2735 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2736 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2740 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2741 "%#jx completed successfully\n", __func__,
2742 (uintmax_t)sasaddr);
2744 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2745 ccb->ccb_h.status = CAM_REQ_CMP;
2747 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2751 * We sync in both directions because we had DMAs in the S/G list
2752 * in both directions.
2754 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2755 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2756 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2757 mps_free_command(sc, cm);
2762 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2764 struct mps_command *cm;
2765 uint8_t *request, *response;
2766 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2767 struct mps_softc *sc;
2776 * XXX We don't yet support physical addresses here.
2778 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2779 case CAM_DATA_PADDR:
2780 case CAM_DATA_SG_PADDR:
2781 mps_dprint(sc, MPS_ERROR,
2782 "%s: physical addresses not supported\n", __func__);
2783 ccb->ccb_h.status = CAM_REQ_INVALID;
2788 * The chip does not support more than one buffer for the
2789 * request or response.
2791 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2792 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2793 mps_dprint(sc, MPS_ERROR,
2794 "%s: multiple request or response "
2795 "buffer segments not supported for SMP\n",
2797 ccb->ccb_h.status = CAM_REQ_INVALID;
2803 * The CAM_SCATTER_VALID flag was originally implemented
2804 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2805 * We have two. So, just take that flag to mean that we
2806 * might have S/G lists, and look at the S/G segment count
2807 * to figure out whether that is the case for each individual
2810 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2811 bus_dma_segment_t *req_sg;
2813 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2814 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2816 request = ccb->smpio.smp_request;
2818 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2819 bus_dma_segment_t *rsp_sg;
2821 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2822 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2824 response = ccb->smpio.smp_response;
2826 case CAM_DATA_VADDR:
2827 request = ccb->smpio.smp_request;
2828 response = ccb->smpio.smp_response;
2831 ccb->ccb_h.status = CAM_REQ_INVALID;
2836 cm = mps_alloc_command(sc);
2838 mps_dprint(sc, MPS_ERROR,
2839 "%s: cannot allocate command\n", __func__);
2840 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2845 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2846 bzero(req, sizeof(*req));
2847 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2849 /* Allow the chip to use any route to this SAS address. */
2850 req->PhysicalPort = 0xff;
2852 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2854 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2856 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2857 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2859 mpi_init_sge(cm, req, &req->SGL);
2862 * Set up a uio to pass into mps_map_command(). This allows us to
2863 * do one map command, and one busdma call in there.
2865 cm->cm_uio.uio_iov = cm->cm_iovec;
2866 cm->cm_uio.uio_iovcnt = 2;
2867 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2870 * The read/write flag isn't used by busdma, but set it just in
2871 * case. This isn't exactly accurate, either, since we're going in
2874 cm->cm_uio.uio_rw = UIO_WRITE;
2876 cm->cm_iovec[0].iov_base = request;
2877 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2878 cm->cm_iovec[1].iov_base = response;
2879 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2881 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2882 cm->cm_iovec[1].iov_len;
2885 * Trigger a warning message in mps_data_cb() for the user if we
2886 * wind up exceeding two S/G segments. The chip expects one
2887 * segment for the request and another for the response.
2889 cm->cm_max_segs = 2;
2891 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2892 cm->cm_complete = mpssas_smpio_complete;
2893 cm->cm_complete_data = ccb;
2896 * Tell the mapping code that we're using a uio, and that this is
2897 * an SMP passthrough request. There is a little special-case
2898 * logic there (in mps_data_cb()) to handle the bidirectional
2901 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2902 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2904 /* The chip data format is little endian. */
2905 req->SASAddress.High = htole32(sasaddr >> 32);
2906 req->SASAddress.Low = htole32(sasaddr);
2909 * XXX Note that we don't have a timeout/abort mechanism here.
2910 * From the manual, it looks like task management requests only
2911 * work for SCSI IO and SATA passthrough requests. We may need to
2912 * have a mechanism to retry requests in the event of a chip reset
2913 * at least. Hopefully the chip will insure that any errors short
2914 * of that are relayed back to the driver.
2916 error = mps_map_command(sc, cm);
2917 if ((error != 0) && (error != EINPROGRESS)) {
2918 mps_dprint(sc, MPS_ERROR,
2919 "%s: error %d returned from mps_map_command()\n",
2927 mps_free_command(sc, cm);
2928 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2935 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2937 struct mps_softc *sc;
2938 struct mpssas_target *targ;
2939 uint64_t sasaddr = 0;
2944 * Make sure the target exists.
2946 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2947 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2948 targ = &sassc->targets[ccb->ccb_h.target_id];
2949 if (targ->handle == 0x0) {
2950 mps_dprint(sc, MPS_ERROR,
2951 "%s: target %d does not exist!\n", __func__,
2952 ccb->ccb_h.target_id);
2953 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2959 * If this device has an embedded SMP target, we'll talk to it
2961 * figure out what the expander's address is.
2963 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2964 sasaddr = targ->sasaddr;
2967 * If we don't have a SAS address for the expander yet, try
2968 * grabbing it from the page 0x83 information cached in the
2969 * transport layer for this target. LSI expanders report the
2970 * expander SAS address as the port-associated SAS address in
2971 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2974 * XXX KDM disable this for now, but leave it commented out so that
2975 * it is obvious that this is another possible way to get the SAS
2978 * The parent handle method below is a little more reliable, and
2979 * the other benefit is that it works for devices other than SES
2980 * devices. So you can send a SMP request to a da(4) device and it
2981 * will get routed to the expander that device is attached to.
2982 * (Assuming the da(4) device doesn't contain an SMP target...)
2986 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2990 * If we still don't have a SAS address for the expander, look for
2991 * the parent device of this device, which is probably the expander.
2994 #ifdef OLD_MPS_PROBE
2995 struct mpssas_target *parent_target;
2998 if (targ->parent_handle == 0x0) {
2999 mps_dprint(sc, MPS_ERROR,
3000 "%s: handle %d does not have a valid "
3001 "parent handle!\n", __func__, targ->handle);
3002 ccb->ccb_h.status = CAM_REQ_INVALID;
3005 #ifdef OLD_MPS_PROBE
3006 parent_target = mpssas_find_target_by_handle(sassc, 0,
3007 targ->parent_handle);
3009 if (parent_target == NULL) {
3010 mps_dprint(sc, MPS_ERROR,
3011 "%s: handle %d does not have a valid "
3012 "parent target!\n", __func__, targ->handle);
3013 ccb->ccb_h.status = CAM_REQ_INVALID;
3017 if ((parent_target->devinfo &
3018 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3019 mps_dprint(sc, MPS_ERROR,
3020 "%s: handle %d parent %d does not "
3021 "have an SMP target!\n", __func__,
3022 targ->handle, parent_target->handle);
3023 ccb->ccb_h.status = CAM_REQ_INVALID;
3028 sasaddr = parent_target->sasaddr;
3029 #else /* OLD_MPS_PROBE */
3030 if ((targ->parent_devinfo &
3031 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3032 mps_dprint(sc, MPS_ERROR,
3033 "%s: handle %d parent %d does not "
3034 "have an SMP target!\n", __func__,
3035 targ->handle, targ->parent_handle);
3036 ccb->ccb_h.status = CAM_REQ_INVALID;
3040 if (targ->parent_sasaddr == 0x0) {
3041 mps_dprint(sc, MPS_ERROR,
3042 "%s: handle %d parent handle %d does "
3043 "not have a valid SAS address!\n",
3044 __func__, targ->handle, targ->parent_handle);
3045 ccb->ccb_h.status = CAM_REQ_INVALID;
3049 sasaddr = targ->parent_sasaddr;
3050 #endif /* OLD_MPS_PROBE */
3055 mps_dprint(sc, MPS_INFO,
3056 "%s: unable to find SAS address for handle %d\n",
3057 __func__, targ->handle);
3058 ccb->ccb_h.status = CAM_REQ_INVALID;
3061 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3069 #endif //__FreeBSD_version >= 900026
3072 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3074 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3075 struct mps_softc *sc;
3076 struct mps_command *tm;
3077 struct mpssas_target *targ;
3079 MPS_FUNCTRACE(sassc->sc);
3080 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3082 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3083 ("Target %d out of bounds in XPT_RESET_DEV\n",
3084 ccb->ccb_h.target_id));
3086 tm = mps_alloc_command(sc);
3088 mps_dprint(sc, MPS_ERROR,
3089 "command alloc failure in mpssas_action_resetdev\n");
3090 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3095 targ = &sassc->targets[ccb->ccb_h.target_id];
3096 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3097 req->DevHandle = htole16(targ->handle);
3098 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3099 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3101 /* SAS Hard Link Reset / SATA Link Reset */
3102 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3105 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3106 tm->cm_complete = mpssas_resetdev_complete;
3107 tm->cm_complete_data = ccb;
3109 mps_map_command(sc, tm);
3113 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3115 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3119 mtx_assert(&sc->mps_mtx, MA_OWNED);
3121 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3122 ccb = tm->cm_complete_data;
3125 * Currently there should be no way we can hit this case. It only
3126 * happens when we have a failure to allocate chain frames, and
3127 * task management commands don't have S/G lists.
3129 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3130 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3132 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3134 mps_dprint(sc, MPS_ERROR,
3135 "%s: cm_flags = %#x for reset of handle %#04x! "
3136 "This should not happen!\n", __func__, tm->cm_flags,
3138 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3142 mps_dprint(sc, MPS_XINFO,
3143 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3144 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3146 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3147 ccb->ccb_h.status = CAM_REQ_CMP;
3148 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3152 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3156 mpssas_free_tm(sc, tm);
3161 mpssas_poll(struct cam_sim *sim)
3163 struct mpssas_softc *sassc;
3165 sassc = cam_sim_softc(sim);
3167 if (sassc->sc->mps_debug & MPS_TRACE) {
3168 /* frequent debug messages during a panic just slow
3169 * everything down too much.
3171 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3172 sassc->sc->mps_debug &= ~MPS_TRACE;
3175 mps_intr_locked(sassc->sc);
3179 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3182 struct mps_softc *sc;
3184 sc = (struct mps_softc *)callback_arg;
3187 #if (__FreeBSD_version >= 1000006) || \
3188 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3189 case AC_ADVINFO_CHANGED: {
3190 struct mpssas_target *target;
3191 struct mpssas_softc *sassc;
3192 struct scsi_read_capacity_data_long rcap_buf;
3193 struct ccb_dev_advinfo cdai;
3194 struct mpssas_lun *lun;
3199 buftype = (uintptr_t)arg;
3205 * We're only interested in read capacity data changes.
3207 if (buftype != CDAI_TYPE_RCAPLONG)
3211 * We should have a handle for this, but check to make sure.
3213 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3214 ("Target %d out of bounds in mpssas_async\n",
3215 xpt_path_target_id(path)));
3216 target = &sassc->targets[xpt_path_target_id(path)];
3217 if (target->handle == 0)
3220 lunid = xpt_path_lun_id(path);
3222 SLIST_FOREACH(lun, &target->luns, lun_link) {
3223 if (lun->lun_id == lunid) {
3229 if (found_lun == 0) {
3230 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3233 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3234 "LUN for EEDP support.\n");
3237 lun->lun_id = lunid;
3238 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3241 bzero(&rcap_buf, sizeof(rcap_buf));
3242 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3243 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3244 cdai.ccb_h.flags = CAM_DIR_IN;
3245 cdai.buftype = CDAI_TYPE_RCAPLONG;
3247 cdai.bufsiz = sizeof(rcap_buf);
3248 cdai.buf = (uint8_t *)&rcap_buf;
3249 xpt_action((union ccb *)&cdai);
3250 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3251 cam_release_devq(cdai.ccb_h.path,
3254 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3255 && (rcap_buf.prot & SRC16_PROT_EN)) {
3256 lun->eedp_formatted = TRUE;
3257 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3259 lun->eedp_formatted = FALSE;
3260 lun->eedp_block_size = 0;
3265 case AC_FOUND_DEVICE: {
3266 struct ccb_getdev *cgd;
3269 mpssas_check_eedp(sc, path, cgd);
3278 #if (__FreeBSD_version < 901503) || \
3279 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3281 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3282 struct ccb_getdev *cgd)
3284 struct mpssas_softc *sassc = sc->sassc;
3285 struct ccb_scsiio *csio;
3286 struct scsi_read_capacity_16 *scsi_cmd;
3287 struct scsi_read_capacity_eedp *rcap_buf;
3289 target_id_t targetid;
3292 struct cam_path *local_path;
3293 struct mpssas_target *target;
3294 struct mpssas_lun *lun;
3299 pathid = cam_sim_path(sassc->sim);
3300 targetid = xpt_path_target_id(path);
3301 lunid = xpt_path_lun_id(path);
3303 KASSERT(targetid < sassc->maxtargets,
3304 ("Target %d out of bounds in mpssas_check_eedp\n",
3306 target = &sassc->targets[targetid];
3307 if (target->handle == 0x0)
3311 * Determine if the device is EEDP capable.
3313 * If this flag is set in the inquiry data,
3314 * the device supports protection information,
3315 * and must support the 16 byte read
3316 * capacity command, otherwise continue without
3317 * sending read cap 16
3319 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3323 * Issue a READ CAPACITY 16 command. This info
3324 * is used to determine if the LUN is formatted
3327 ccb = xpt_alloc_ccb_nowait();
3329 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3330 "for EEDP support.\n");
3334 if (xpt_create_path(&local_path, xpt_periph,
3335 pathid, targetid, lunid) != CAM_REQ_CMP) {
3336 mps_dprint(sc, MPS_ERROR, "Unable to create "
3337 "path for EEDP support\n");
3343 * If LUN is already in list, don't create a new
3347 SLIST_FOREACH(lun, &target->luns, lun_link) {
3348 if (lun->lun_id == lunid) {
3354 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3357 mps_dprint(sc, MPS_ERROR,
3358 "Unable to alloc LUN for EEDP support.\n");
3359 xpt_free_path(local_path);
3363 lun->lun_id = lunid;
3364 SLIST_INSERT_HEAD(&target->luns, lun,
3368 xpt_path_string(local_path, path_str, sizeof(path_str));
3369 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3370 path_str, target->handle);
3373 * Issue a READ CAPACITY 16 command for the LUN.
3374 * The mpssas_read_cap_done function will load
3375 * the read cap info into the LUN struct.
3377 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3378 M_MPT2, M_NOWAIT | M_ZERO);
3379 if (rcap_buf == NULL) {
3380 mps_dprint(sc, MPS_FAULT,
3381 "Unable to alloc read capacity buffer for EEDP support.\n");
3382 xpt_free_path(ccb->ccb_h.path);
3386 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3388 csio->ccb_h.func_code = XPT_SCSI_IO;
3389 csio->ccb_h.flags = CAM_DIR_IN;
3390 csio->ccb_h.retry_count = 4;
3391 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3392 csio->ccb_h.timeout = 60000;
3393 csio->data_ptr = (uint8_t *)rcap_buf;
3394 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3395 csio->sense_len = MPS_SENSE_LEN;
3396 csio->cdb_len = sizeof(*scsi_cmd);
3397 csio->tag_action = MSG_SIMPLE_Q_TAG;
3399 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3400 bzero(scsi_cmd, sizeof(*scsi_cmd));
3401 scsi_cmd->opcode = 0x9E;
3402 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3403 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3405 ccb->ccb_h.ppriv_ptr1 = sassc;
3410 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3412 struct mpssas_softc *sassc;
3413 struct mpssas_target *target;
3414 struct mpssas_lun *lun;
3415 struct scsi_read_capacity_eedp *rcap_buf;
3417 if (done_ccb == NULL)
3420 /* Driver need to release devq, it Scsi command is
3421 * generated by driver internally.
3422 * Currently there is a single place where driver
3423 * calls scsi command internally. In future if driver
3424 * calls more scsi command internally, it needs to release
3425 * devq internally, since those command will not go back to
3428 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3429 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3430 xpt_release_devq(done_ccb->ccb_h.path,
3431 /*count*/ 1, /*run_queue*/TRUE);
3434 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3437 * Get the LUN ID for the path and look it up in the LUN list for the
3440 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3441 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3442 ("Target %d out of bounds in mpssas_read_cap_done\n",
3443 done_ccb->ccb_h.target_id));
3444 target = &sassc->targets[done_ccb->ccb_h.target_id];
3445 SLIST_FOREACH(lun, &target->luns, lun_link) {
3446 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3450 * Got the LUN in the target's LUN list. Fill it in
3451 * with EEDP info. If the READ CAP 16 command had some
3452 * SCSI error (common if command is not supported), mark
3453 * the lun as not supporting EEDP and set the block size
3456 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3457 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3458 lun->eedp_formatted = FALSE;
3459 lun->eedp_block_size = 0;
3463 if (rcap_buf->protect & 0x01) {
3464 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3465 "target ID %d is formatted for EEDP "
3466 "support.\n", done_ccb->ccb_h.target_lun,
3467 done_ccb->ccb_h.target_id);
3468 lun->eedp_formatted = TRUE;
3469 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3474 // Finished with this CCB and path.
3475 free(rcap_buf, M_MPT2);
3476 xpt_free_path(done_ccb->ccb_h.path);
3477 xpt_free_ccb(done_ccb);
3479 #endif /* (__FreeBSD_version < 901503) || \
3480 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3483 mpssas_startup(struct mps_softc *sc)
3487 * Send the port enable message and set the wait_for_port_enable flag.
3488 * This flag helps to keep the simq frozen until all discovery events
3491 sc->wait_for_port_enable = 1;
3492 mpssas_send_portenable(sc);
3497 mpssas_send_portenable(struct mps_softc *sc)
3499 MPI2_PORT_ENABLE_REQUEST *request;
3500 struct mps_command *cm;
3504 if ((cm = mps_alloc_command(sc)) == NULL)
3506 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3507 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3508 request->MsgFlags = 0;
3510 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3511 cm->cm_complete = mpssas_portenable_complete;
3515 mps_map_command(sc, cm);
3516 mps_dprint(sc, MPS_XINFO,
3517 "mps_send_portenable finished cm %p req %p complete %p\n",
3518 cm, cm->cm_req, cm->cm_complete);
3523 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3525 MPI2_PORT_ENABLE_REPLY *reply;
3526 struct mpssas_softc *sassc;
3532 * Currently there should be no way we can hit this case. It only
3533 * happens when we have a failure to allocate chain frames, and
3534 * port enable commands don't have S/G lists.
3536 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3537 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3538 "This should not happen!\n", __func__, cm->cm_flags);
3541 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3543 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3544 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3545 MPI2_IOCSTATUS_SUCCESS)
3546 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3548 mps_free_command(sc, cm);
3549 if (sc->mps_ich.ich_arg != NULL) {
3550 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3551 config_intrhook_disestablish(&sc->mps_ich);
3552 sc->mps_ich.ich_arg = NULL;
3556 * Get WarpDrive info after discovery is complete but before the scan
3557 * starts. At this point, all devices are ready to be exposed to the
3558 * OS. If devices should be hidden instead, take them out of the
3559 * 'targets' array before the scan. The devinfo for a disk will have
3560 * some info and a volume's will be 0. Use that to remove disks.
3562 mps_wd_config_pages(sc);
3565 * Done waiting for port enable to complete. Decrement the refcount.
3566 * If refcount is 0, discovery is complete and a rescan of the bus can
3567 * take place. Since the simq was explicitly frozen before port
3568 * enable, it must be explicitly released here to keep the
3569 * freeze/release count in sync.
3571 sc->wait_for_port_enable = 0;
3572 sc->port_enable_complete = 1;
3573 wakeup(&sc->port_enable_complete);
3574 mpssas_startup_decrement(sassc);
3578 mpssas_check_id(struct mpssas_softc *sassc, int id)
3580 struct mps_softc *sc = sassc->sc;
3584 ids = &sc->exclude_ids[0];
3585 while((name = strsep(&ids, ",")) != NULL) {
3586 if (name[0] == '\0')
3588 if (strtol(name, NULL, 0) == (long)id)