2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * LSI MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for LSI MPT2 */
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
88 #define MPSSAS_DISCOVERY_TIMEOUT 20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126 struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149 struct mps_command *cm);
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
154 struct mpssas_target *target;
157 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
158 target = &sassc->targets[i];
159 if (target->handle == handle)
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery. Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
174 mpssas_startup_increment(struct mpssas_softc *sassc)
176 MPS_FUNCTRACE(sassc->sc);
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INIT,
182 "%s freezing simq\n", __func__);
183 xpt_freeze_simq(sassc->sim, 1);
185 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
186 sassc->startup_refcount);
191 mpssas_startup_decrement(struct mpssas_softc *sassc)
193 MPS_FUNCTRACE(sassc->sc);
195 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
196 if (--sassc->startup_refcount == 0) {
197 /* finished all discovery-related actions, release
198 * the simq and rescan for the latest topology.
200 mps_dprint(sassc->sc, MPS_INIT,
201 "%s releasing simq\n", __func__);
202 sassc->flags &= ~MPSSAS_IN_STARTUP;
203 #if __FreeBSD_version >= 1000039
206 xpt_release_simq(sassc->sim, 1);
207 mpssas_rescan_target(sassc->sc, NULL);
210 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
211 sassc->startup_refcount);
215 /* LSI's firmware requires us to stop sending commands when we're doing task
216 * management, so refcount the TMs and keep the simq frozen when any are in
220 mpssas_alloc_tm(struct mps_softc *sc)
222 struct mps_command *tm;
225 tm = mps_alloc_high_priority_command(sc);
227 if (sc->sassc->tm_count++ == 0) {
228 mps_dprint(sc, MPS_RECOVERY,
229 "%s freezing simq\n", __func__);
230 xpt_freeze_simq(sc->sassc->sim, 1);
232 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
233 sc->sassc->tm_count);
239 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
241 mps_dprint(sc, MPS_TRACE, "%s", __func__);
245 /* if there are no TMs in use, we can release the simq. We use our
246 * own refcount so that it's easier for a diag reset to cleanup and
249 if (--sc->sassc->tm_count == 0) {
250 mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
251 xpt_release_simq(sc->sassc->sim, 1);
253 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
254 sc->sassc->tm_count);
256 mps_free_high_priority_command(sc, tm);
260 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
262 struct mpssas_softc *sassc = sc->sassc;
264 target_id_t targetid;
268 pathid = cam_sim_path(sassc->sim);
270 targetid = CAM_TARGET_WILDCARD;
272 targetid = targ - sassc->targets;
275 * Allocate a CCB and schedule a rescan.
277 ccb = xpt_alloc_ccb_nowait();
279 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
283 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
284 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
285 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
290 if (targetid == CAM_TARGET_WILDCARD)
291 ccb->ccb_h.func_code = XPT_SCAN_BUS;
293 ccb->ccb_h.func_code = XPT_SCAN_TGT;
295 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
300 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
310 /* No need to be in here if debugging isn't enabled */
311 if ((cm->cm_sc->mps_debug & level) == 0)
314 sbuf_new(&sb, str, sizeof(str), 0);
318 if (cm->cm_ccb != NULL) {
319 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
321 sbuf_cat(&sb, path_str);
322 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
323 scsi_command_string(&cm->cm_ccb->csio, &sb);
324 sbuf_printf(&sb, "length %d ",
325 cm->cm_ccb->csio.dxfer_len);
329 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
330 cam_sim_name(cm->cm_sc->sassc->sim),
331 cam_sim_unit(cm->cm_sc->sassc->sim),
332 cam_sim_bus(cm->cm_sc->sassc->sim),
333 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
337 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
338 sbuf_vprintf(&sb, fmt, ap);
340 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
347 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
349 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
350 struct mpssas_target *targ;
355 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
356 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
360 /* XXX retry the remove after the diag reset completes? */
361 mps_dprint(sc, MPS_FAULT,
362 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
363 mpssas_free_tm(sc, tm);
367 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
368 mps_dprint(sc, MPS_FAULT,
369 "IOCStatus = 0x%x while resetting device 0x%x\n",
370 reply->IOCStatus, handle);
371 mpssas_free_tm(sc, tm);
375 mps_dprint(sc, MPS_XINFO,
376 "Reset aborted %u commands\n", reply->TerminationCount);
377 mps_free_reply(sc, tm->cm_reply_data);
378 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
380 mps_dprint(sc, MPS_XINFO,
381 "clearing target %u handle 0x%04x\n", targ->tid, handle);
384 * Don't clear target if remove fails because things will get confusing.
385 * Leave the devname and sasaddr intact so that we know to avoid reusing
386 * this target id if possible, and so we can assign the same target id
387 * to this device if it comes back in the future.
389 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
392 targ->encl_handle = 0x0;
393 targ->encl_slot = 0x0;
394 targ->exp_dev_handle = 0x0;
396 targ->linkrate = 0x0;
401 mpssas_free_tm(sc, tm);
406 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
407 * Otherwise Volume Delete is same as Bare Drive Removal.
410 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
412 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
413 struct mps_softc *sc;
414 struct mps_command *cm;
415 struct mpssas_target *targ = NULL;
417 MPS_FUNCTRACE(sassc->sc);
422 * If this is a WD controller, determine if the disk should be exposed
423 * to the OS or not. If disk should be exposed, return from this
424 * function without doing anything.
426 if (sc->WD_available && (sc->WD_hide_expose ==
427 MPS_WD_EXPOSE_ALWAYS)) {
432 targ = mpssas_find_target_by_handle(sassc, 0, handle);
434 /* FIXME: what is the action? */
435 /* We don't know about this device? */
436 mps_dprint(sc, MPS_ERROR,
437 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
441 targ->flags |= MPSSAS_TARGET_INREMOVAL;
443 cm = mpssas_alloc_tm(sc);
445 mps_dprint(sc, MPS_ERROR,
446 "%s: command alloc failure\n", __func__);
450 mpssas_rescan_target(sc, targ);
452 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
453 req->DevHandle = targ->handle;
454 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
455 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
457 /* SAS Hard Link Reset / SATA Link Reset */
458 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
462 cm->cm_desc.HighPriority.RequestFlags =
463 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
464 cm->cm_complete = mpssas_remove_volume;
465 cm->cm_complete_data = (void *)(uintptr_t)handle;
466 mps_map_command(sc, cm);
470 * The MPT2 firmware performs debounce on the link to avoid transient link
471 * errors and false removals. When it does decide that link has been lost
472 * and a device need to go away, it expects that the host will perform a
473 * target reset and then an op remove. The reset has the side-effect of
474 * aborting any outstanding requests for the device, which is required for
475 * the op-remove to succeed. It's not clear if the host should check for
476 * the device coming back alive after the reset.
479 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
481 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
482 struct mps_softc *sc;
483 struct mps_command *cm;
484 struct mpssas_target *targ = NULL;
486 MPS_FUNCTRACE(sassc->sc);
490 targ = mpssas_find_target_by_handle(sassc, 0, handle);
492 /* FIXME: what is the action? */
493 /* We don't know about this device? */
494 mps_dprint(sc, MPS_ERROR,
495 "%s : invalid handle 0x%x \n", __func__, handle);
499 targ->flags |= MPSSAS_TARGET_INREMOVAL;
501 cm = mpssas_alloc_tm(sc);
503 mps_dprint(sc, MPS_ERROR,
504 "%s: command alloc failure\n", __func__);
508 mpssas_rescan_target(sc, targ);
510 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
511 memset(req, 0, sizeof(*req));
512 req->DevHandle = htole16(targ->handle);
513 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
514 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
516 /* SAS Hard Link Reset / SATA Link Reset */
517 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
521 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
522 cm->cm_complete = mpssas_remove_device;
523 cm->cm_complete_data = (void *)(uintptr_t)handle;
524 mps_map_command(sc, cm);
528 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
530 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
531 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
532 struct mpssas_target *targ;
533 struct mps_command *next_cm;
538 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
539 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
543 * Currently there should be no way we can hit this case. It only
544 * happens when we have a failure to allocate chain frames, and
545 * task management commands don't have S/G lists.
547 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
548 mps_dprint(sc, MPS_ERROR,
549 "%s: cm_flags = %#x for remove of handle %#04x! "
550 "This should not happen!\n", __func__, tm->cm_flags,
552 mpssas_free_tm(sc, tm);
557 /* XXX retry the remove after the diag reset completes? */
558 mps_dprint(sc, MPS_FAULT,
559 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
560 mpssas_free_tm(sc, tm);
564 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
565 mps_dprint(sc, MPS_FAULT,
566 "IOCStatus = 0x%x while resetting device 0x%x\n",
567 le16toh(reply->IOCStatus), handle);
568 mpssas_free_tm(sc, tm);
572 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
573 le32toh(reply->TerminationCount));
574 mps_free_reply(sc, tm->cm_reply_data);
575 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
577 /* Reuse the existing command */
578 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
579 memset(req, 0, sizeof(*req));
580 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
581 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
582 req->DevHandle = htole16(handle);
584 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
585 tm->cm_complete = mpssas_remove_complete;
586 tm->cm_complete_data = (void *)(uintptr_t)handle;
588 mps_map_command(sc, tm);
590 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
592 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
595 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
596 ccb = tm->cm_complete_data;
597 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
598 mpssas_scsiio_complete(sc, tm);
603 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
605 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
607 struct mpssas_target *targ;
608 struct mpssas_lun *lun;
612 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
613 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
616 * Currently there should be no way we can hit this case. It only
617 * happens when we have a failure to allocate chain frames, and
618 * task management commands don't have S/G lists.
620 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
621 mps_dprint(sc, MPS_XINFO,
622 "%s: cm_flags = %#x for remove of handle %#04x! "
623 "This should not happen!\n", __func__, tm->cm_flags,
625 mpssas_free_tm(sc, tm);
630 /* most likely a chip reset */
631 mps_dprint(sc, MPS_FAULT,
632 "%s NULL reply removing device 0x%04x\n", __func__, handle);
633 mpssas_free_tm(sc, tm);
637 mps_dprint(sc, MPS_XINFO,
638 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
639 handle, le16toh(reply->IOCStatus));
642 * Don't clear target if remove fails because things will get confusing.
643 * Leave the devname and sasaddr intact so that we know to avoid reusing
644 * this target id if possible, and so we can assign the same target id
645 * to this device if it comes back in the future.
647 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
650 targ->encl_handle = 0x0;
651 targ->encl_slot = 0x0;
652 targ->exp_dev_handle = 0x0;
654 targ->linkrate = 0x0;
658 while(!SLIST_EMPTY(&targ->luns)) {
659 lun = SLIST_FIRST(&targ->luns);
660 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
666 mpssas_free_tm(sc, tm);
670 mpssas_register_events(struct mps_softc *sc)
672 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
675 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
676 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
677 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
678 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
679 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
680 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
681 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
682 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
683 setbit(events, MPI2_EVENT_IR_VOLUME);
684 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
685 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
686 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
688 mps_register_events(sc, events, mpssas_evt_handler, NULL,
689 &sc->sassc->mpssas_eh);
695 mps_attach_sas(struct mps_softc *sc)
697 struct mpssas_softc *sassc;
703 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
705 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
709 sassc->targets = malloc(sizeof(struct mpssas_target) *
710 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
711 if(!sassc->targets) {
712 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
720 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
721 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
726 unit = device_get_unit(sc->mps_dev);
727 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
728 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
729 if (sassc->sim == NULL) {
730 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
735 TAILQ_INIT(&sassc->ev_queue);
737 /* Initialize taskqueue for Event Handling */
738 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
739 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
740 taskqueue_thread_enqueue, &sassc->ev_tq);
742 /* Run the task queue with lowest priority */
743 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
744 device_get_nameunit(sc->mps_dev));
749 * XXX There should be a bus for every port on the adapter, but since
750 * we're just going to fake the topology for now, we'll pretend that
751 * everything is just a target on a single bus.
753 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
754 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
761 * Assume that discovery events will start right away.
763 * Hold off boot until discovery is complete.
765 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
766 #if __FreeBSD_version >= 1000039
769 xpt_freeze_simq(sassc->sim, 1);
771 sc->sassc->startup_refcount = 0;
773 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
774 sassc->discovery_timeouts = 0;
779 * Register for async events so we can determine the EEDP
780 * capabilities of devices.
782 status = xpt_create_path(&sassc->path, /*periph*/NULL,
783 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
785 if (status != CAM_REQ_CMP) {
786 mps_printf(sc, "Error %#x creating sim path\n", status);
791 #if (__FreeBSD_version >= 1000006) || \
792 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
793 event = AC_ADVINFO_CHANGED;
795 event = AC_FOUND_DEVICE;
797 status = xpt_register_async(event, mpssas_async, sc,
799 if (status != CAM_REQ_CMP) {
800 mps_dprint(sc, MPS_ERROR,
801 "Error %#x registering async handler for "
802 "AC_ADVINFO_CHANGED events\n", status);
803 xpt_free_path(sassc->path);
807 if (status != CAM_REQ_CMP) {
809 * EEDP use is the exception, not the rule.
810 * Warn the user, but do not fail to attach.
812 mps_printf(sc, "EEDP capabilities disabled.\n");
817 mpssas_register_events(sc);
825 mps_detach_sas(struct mps_softc *sc)
827 struct mpssas_softc *sassc;
828 struct mpssas_lun *lun, *lun_tmp;
829 struct mpssas_target *targ;
834 if (sc->sassc == NULL)
838 mps_deregister_events(sc, sassc->mpssas_eh);
841 * Drain and free the event handling taskqueue with the lock
842 * unheld so that any parallel processing tasks drain properly
843 * without deadlocking.
845 if (sassc->ev_tq != NULL)
846 taskqueue_free(sassc->ev_tq);
848 /* Make sure CAM doesn't wedge if we had to bail out early. */
851 /* Deregister our async handler */
852 if (sassc->path != NULL) {
853 xpt_register_async(0, mpssas_async, sc, sassc->path);
854 xpt_free_path(sassc->path);
858 if (sassc->flags & MPSSAS_IN_STARTUP)
859 xpt_release_simq(sassc->sim, 1);
861 if (sassc->sim != NULL) {
862 xpt_bus_deregister(cam_sim_path(sassc->sim));
863 cam_sim_free(sassc->sim, FALSE);
866 sassc->flags |= MPSSAS_SHUTDOWN;
869 if (sassc->devq != NULL)
870 cam_simq_free(sassc->devq);
872 for(i=0; i< sc->facts->MaxTargets ;i++) {
873 targ = &sassc->targets[i];
874 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
878 free(sassc->targets, M_MPT2);
886 mpssas_discovery_end(struct mpssas_softc *sassc)
888 struct mps_softc *sc = sassc->sc;
892 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
893 callout_stop(&sassc->discovery_callout);
898 mpssas_discovery_timeout(void *data)
900 struct mpssas_softc *sassc = data;
901 struct mps_softc *sc;
907 mps_dprint(sc, MPS_INFO,
908 "Timeout waiting for discovery, interrupts may not be working!\n");
909 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
911 /* Poll the hardware for events in case interrupts aren't working */
914 mps_dprint(sassc->sc, MPS_INFO,
915 "Finished polling after discovery timeout at %d\n", ticks);
917 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
918 mpssas_discovery_end(sassc);
920 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
921 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
922 callout_reset(&sassc->discovery_callout,
923 MPSSAS_DISCOVERY_TIMEOUT * hz,
924 mpssas_discovery_timeout, sassc);
925 sassc->discovery_timeouts++;
927 mps_dprint(sassc->sc, MPS_FAULT,
928 "Discovery timed out, continuing.\n");
929 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
930 mpssas_discovery_end(sassc);
938 mpssas_action(struct cam_sim *sim, union ccb *ccb)
940 struct mpssas_softc *sassc;
942 sassc = cam_sim_softc(sim);
944 MPS_FUNCTRACE(sassc->sc);
945 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
946 ccb->ccb_h.func_code);
947 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
949 switch (ccb->ccb_h.func_code) {
952 struct ccb_pathinq *cpi = &ccb->cpi;
954 cpi->version_num = 1;
955 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
956 cpi->target_sprt = 0;
957 #if __FreeBSD_version >= 1000039
958 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
960 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
962 cpi->hba_eng_cnt = 0;
963 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
965 cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
966 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
967 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
968 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
969 cpi->unit_number = cam_sim_unit(sim);
970 cpi->bus_id = cam_sim_bus(sim);
971 cpi->base_transfer_speed = 150000;
972 cpi->transport = XPORT_SAS;
973 cpi->transport_version = 0;
974 cpi->protocol = PROTO_SCSI;
975 cpi->protocol_version = SCSI_REV_SPC;
976 #if __FreeBSD_version >= 800001
978 * XXX KDM where does this number come from?
980 cpi->maxio = 256 * 1024;
982 cpi->ccb_h.status = CAM_REQ_CMP;
985 case XPT_GET_TRAN_SETTINGS:
987 struct ccb_trans_settings *cts;
988 struct ccb_trans_settings_sas *sas;
989 struct ccb_trans_settings_scsi *scsi;
990 struct mpssas_target *targ;
993 sas = &cts->xport_specific.sas;
994 scsi = &cts->proto_specific.scsi;
996 targ = &sassc->targets[cts->ccb_h.target_id];
997 if (targ->handle == 0x0) {
998 cts->ccb_h.status = CAM_SEL_TIMEOUT;
1002 cts->protocol_version = SCSI_REV_SPC2;
1003 cts->transport = XPORT_SAS;
1004 cts->transport_version = 0;
1006 sas->valid = CTS_SAS_VALID_SPEED;
1007 switch (targ->linkrate) {
1009 sas->bitrate = 150000;
1012 sas->bitrate = 300000;
1015 sas->bitrate = 600000;
1021 cts->protocol = PROTO_SCSI;
1022 scsi->valid = CTS_SCSI_VALID_TQ;
1023 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1025 cts->ccb_h.status = CAM_REQ_CMP;
1028 case XPT_CALC_GEOMETRY:
1029 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1030 ccb->ccb_h.status = CAM_REQ_CMP;
1033 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1034 mpssas_action_resetdev(sassc, ccb);
1039 mps_dprint(sassc->sc, MPS_XINFO,
1040 "mpssas_action faking success for abort or reset\n");
1041 ccb->ccb_h.status = CAM_REQ_CMP;
1044 mpssas_action_scsiio(sassc, ccb);
1046 #if __FreeBSD_version >= 900026
1048 mpssas_action_smpio(sassc, ccb);
1052 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1060 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1061 target_id_t target_id, lun_id_t lun_id)
1063 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1064 struct cam_path *path;
1066 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__,
1067 ac_code, target_id, lun_id);
1069 if (xpt_create_path(&path, NULL,
1070 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1071 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1076 xpt_async(ac_code, path, NULL);
1077 xpt_free_path(path);
1081 mpssas_complete_all_commands(struct mps_softc *sc)
1083 struct mps_command *cm;
1088 mtx_assert(&sc->mps_mtx, MA_OWNED);
1090 /* complete all commands with a NULL reply */
1091 for (i = 1; i < sc->num_reqs; i++) {
1092 cm = &sc->commands[i];
1093 cm->cm_reply = NULL;
1096 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1097 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1099 if (cm->cm_complete != NULL) {
1100 mpssas_log_command(cm, MPS_RECOVERY,
1101 "completing cm %p state %x ccb %p for diag reset\n",
1102 cm, cm->cm_state, cm->cm_ccb);
1104 cm->cm_complete(sc, cm);
1108 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1109 mpssas_log_command(cm, MPS_RECOVERY,
1110 "waking up cm %p state %x ccb %p for diag reset\n",
1111 cm, cm->cm_state, cm->cm_ccb);
1116 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1117 /* this should never happen, but if it does, log */
1118 mpssas_log_command(cm, MPS_RECOVERY,
1119 "cm %p state %x flags 0x%x ccb %p during diag "
1120 "reset\n", cm, cm->cm_state, cm->cm_flags,
1127 mpssas_handle_reinit(struct mps_softc *sc)
1131 /* Go back into startup mode and freeze the simq, so that CAM
1132 * doesn't send any commands until after we've rediscovered all
1133 * targets and found the proper device handles for them.
1135 * After the reset, portenable will trigger discovery, and after all
1136 * discovery-related activities have finished, the simq will be
1139 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1140 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1141 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1142 xpt_freeze_simq(sc->sassc->sim, 1);
1144 /* notify CAM of a bus reset */
1145 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1148 /* complete and cleanup after all outstanding commands */
1149 mpssas_complete_all_commands(sc);
1151 mps_dprint(sc, MPS_INIT,
1152 "%s startup %u tm %u after command completion\n",
1153 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1156 * The simq was explicitly frozen above, so set the refcount to 0.
1157 * The simq will be explicitly released after port enable completes.
1159 sc->sassc->startup_refcount = 0;
1161 /* zero all the target handles, since they may change after the
1162 * reset, and we have to rediscover all the targets and use the new
1165 for (i = 0; i < sc->facts->MaxTargets; i++) {
1166 if (sc->sassc->targets[i].outstanding != 0)
1167 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1168 i, sc->sassc->targets[i].outstanding);
1169 sc->sassc->targets[i].handle = 0x0;
1170 sc->sassc->targets[i].exp_dev_handle = 0x0;
1171 sc->sassc->targets[i].outstanding = 0;
1172 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1177 mpssas_tm_timeout(void *data)
1179 struct mps_command *tm = data;
1180 struct mps_softc *sc = tm->cm_sc;
1182 mtx_assert(&sc->mps_mtx, MA_OWNED);
1184 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1185 "task mgmt %p timed out\n", tm);
1190 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1192 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1193 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1194 unsigned int cm_count = 0;
1195 struct mps_command *cm;
1196 struct mpssas_target *targ;
1198 callout_stop(&tm->cm_callout);
1200 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1201 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1205 * Currently there should be no way we can hit this case. It only
1206 * happens when we have a failure to allocate chain frames, and
1207 * task management commands don't have S/G lists.
1208 * XXXSL So should it be an assertion?
1210 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1211 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1212 "This should not happen!\n", __func__, tm->cm_flags);
1213 mpssas_free_tm(sc, tm);
1217 if (reply == NULL) {
1218 mpssas_log_command(tm, MPS_RECOVERY,
1219 "NULL reset reply for tm %p\n", tm);
1220 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1221 /* this completion was due to a reset, just cleanup */
1222 targ->flags &= ~MPSSAS_TARGET_INRESET;
1224 mpssas_free_tm(sc, tm);
1227 /* we should have gotten a reply. */
1233 mpssas_log_command(tm, MPS_RECOVERY,
1234 "logical unit reset status 0x%x code 0x%x count %u\n",
1235 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1236 le32toh(reply->TerminationCount));
1238 /* See if there are any outstanding commands for this LUN.
1239 * This could be made more efficient by using a per-LU data
1240 * structure of some sort.
1242 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1243 if (cm->cm_lun == tm->cm_lun)
1247 if (cm_count == 0) {
1248 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1249 "logical unit %u finished recovery after reset\n",
1252 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1255 /* we've finished recovery for this logical unit. check and
1256 * see if some other logical unit has a timedout command
1257 * that needs to be processed.
1259 cm = TAILQ_FIRST(&targ->timedout_commands);
1261 mpssas_send_abort(sc, tm, cm);
1265 mpssas_free_tm(sc, tm);
1269 /* if we still have commands for this LUN, the reset
1270 * effectively failed, regardless of the status reported.
1271 * Escalate to a target reset.
1273 mpssas_log_command(tm, MPS_RECOVERY,
1274 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1276 mpssas_send_reset(sc, tm,
1277 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1282 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1284 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1285 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1286 struct mpssas_target *targ;
1288 callout_stop(&tm->cm_callout);
1290 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1291 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1295 * Currently there should be no way we can hit this case. It only
1296 * happens when we have a failure to allocate chain frames, and
1297 * task management commands don't have S/G lists.
1299 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1300 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1301 "This should not happen!\n", __func__, tm->cm_flags);
1302 mpssas_free_tm(sc, tm);
1306 if (reply == NULL) {
1307 mpssas_log_command(tm, MPS_RECOVERY,
1308 "NULL reset reply for tm %p\n", tm);
1309 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1310 /* this completion was due to a reset, just cleanup */
1311 targ->flags &= ~MPSSAS_TARGET_INRESET;
1313 mpssas_free_tm(sc, tm);
1316 /* we should have gotten a reply. */
1322 mpssas_log_command(tm, MPS_RECOVERY,
1323 "target reset status 0x%x code 0x%x count %u\n",
1324 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1325 le32toh(reply->TerminationCount));
1327 targ->flags &= ~MPSSAS_TARGET_INRESET;
1329 if (targ->outstanding == 0) {
1330 /* we've finished recovery for this target and all
1331 * of its logical units.
1333 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1334 "recovery finished after target reset\n");
1336 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1340 mpssas_free_tm(sc, tm);
1343 /* after a target reset, if this target still has
1344 * outstanding commands, the reset effectively failed,
1345 * regardless of the status reported. escalate.
1347 mpssas_log_command(tm, MPS_RECOVERY,
1348 "target reset complete for tm %p, but still have %u command(s)\n",
1349 tm, targ->outstanding);
1354 #define MPS_RESET_TIMEOUT 30
1357 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1359 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1360 struct mpssas_target *target;
1363 target = tm->cm_targ;
1364 if (target->handle == 0) {
1365 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1366 __func__, target->tid);
1370 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1371 req->DevHandle = htole16(target->handle);
1372 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1373 req->TaskType = type;
1375 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1376 /* XXX Need to handle invalid LUNs */
1377 MPS_SET_LUN(req->LUN, tm->cm_lun);
1378 tm->cm_targ->logical_unit_resets++;
1379 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1380 "sending logical unit reset\n");
1381 tm->cm_complete = mpssas_logical_unit_reset_complete;
1383 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1384 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1385 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1386 tm->cm_targ->target_resets++;
1387 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1388 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1389 "sending target reset\n");
1390 tm->cm_complete = mpssas_target_reset_complete;
1393 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1398 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1399 tm->cm_complete_data = (void *)tm;
1401 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1402 mpssas_tm_timeout, tm);
1404 err = mps_map_command(sc, tm);
1406 mpssas_log_command(tm, MPS_RECOVERY,
1407 "error %d sending reset type %u\n",
1415 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1417 struct mps_command *cm;
1418 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1419 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1420 struct mpssas_target *targ;
1422 callout_stop(&tm->cm_callout);
1424 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1425 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1429 * Currently there should be no way we can hit this case. It only
1430 * happens when we have a failure to allocate chain frames, and
1431 * task management commands don't have S/G lists.
1433 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1434 mpssas_log_command(tm, MPS_RECOVERY,
1435 "cm_flags = %#x for abort %p TaskMID %u!\n",
1436 tm->cm_flags, tm, le16toh(req->TaskMID));
1437 mpssas_free_tm(sc, tm);
1441 if (reply == NULL) {
1442 mpssas_log_command(tm, MPS_RECOVERY,
1443 "NULL abort reply for tm %p TaskMID %u\n",
1444 tm, le16toh(req->TaskMID));
1445 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1446 /* this completion was due to a reset, just cleanup */
1448 mpssas_free_tm(sc, tm);
1451 /* we should have gotten a reply. */
1457 mpssas_log_command(tm, MPS_RECOVERY,
1458 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1459 le16toh(req->TaskMID),
1460 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1461 le32toh(reply->TerminationCount));
1463 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1465 /* if there are no more timedout commands, we're done with
1466 * error recovery for this target.
1468 mpssas_log_command(tm, MPS_RECOVERY,
1469 "finished recovery after aborting TaskMID %u\n",
1470 le16toh(req->TaskMID));
1473 mpssas_free_tm(sc, tm);
1475 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1476 /* abort success, but we have more timedout commands to abort */
1477 mpssas_log_command(tm, MPS_RECOVERY,
1478 "continuing recovery after aborting TaskMID %u\n",
1479 le16toh(req->TaskMID));
1481 mpssas_send_abort(sc, tm, cm);
1484 /* we didn't get a command completion, so the abort
1485 * failed as far as we're concerned. escalate.
1487 mpssas_log_command(tm, MPS_RECOVERY,
1488 "abort failed for TaskMID %u tm %p\n",
1489 le16toh(req->TaskMID), tm);
1491 mpssas_send_reset(sc, tm,
1492 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1496 #define MPS_ABORT_TIMEOUT 5
1499 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1501 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1502 struct mpssas_target *targ;
1506 if (targ->handle == 0) {
1507 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1508 __func__, cm->cm_ccb->ccb_h.target_id);
1512 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1513 "Aborting command %p\n", cm);
1515 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1516 req->DevHandle = htole16(targ->handle);
1517 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1518 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1520 /* XXX Need to handle invalid LUNs */
1521 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1523 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1526 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1527 tm->cm_complete = mpssas_abort_complete;
1528 tm->cm_complete_data = (void *)tm;
1529 tm->cm_targ = cm->cm_targ;
1530 tm->cm_lun = cm->cm_lun;
1532 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1533 mpssas_tm_timeout, tm);
1537 err = mps_map_command(sc, tm);
1539 mpssas_log_command(tm, MPS_RECOVERY,
1540 "error %d sending abort for cm %p SMID %u\n",
1541 err, cm, req->TaskMID);
1547 mpssas_scsiio_timeout(void *data)
1549 struct mps_softc *sc;
1550 struct mps_command *cm;
1551 struct mpssas_target *targ;
1553 cm = (struct mps_command *)data;
1557 mtx_assert(&sc->mps_mtx, MA_OWNED);
1559 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1562 * Run the interrupt handler to make sure it's not pending. This
1563 * isn't perfect because the command could have already completed
1564 * and been re-used, though this is unlikely.
1566 mps_intr_locked(sc);
1567 if (cm->cm_state == MPS_CM_STATE_FREE) {
1568 mpssas_log_command(cm, MPS_XINFO,
1569 "SCSI command %p almost timed out\n", cm);
1573 if (cm->cm_ccb == NULL) {
1574 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1578 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1584 /* XXX first, check the firmware state, to see if it's still
1585 * operational. if not, do a diag reset.
1588 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1589 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1590 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1592 if (targ->tm != NULL) {
1593 /* target already in recovery, just queue up another
1594 * timedout command to be processed later.
1596 mps_dprint(sc, MPS_RECOVERY,
1597 "queued timedout cm %p for processing by tm %p\n",
1600 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1601 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1604 /* start recovery by aborting the first timedout command */
1605 mpssas_send_abort(sc, targ->tm, cm);
1608 /* XXX queue this target up for recovery once a TM becomes
1609 * available. The firmware only has a limited number of
1610 * HighPriority credits for the high priority requests used
1611 * for task management, and we ran out.
1613 * Isilon: don't worry about this for now, since we have
1614 * more credits than disks in an enclosure, and limit
1615 * ourselves to one TM per target for recovery.
1617 mps_dprint(sc, MPS_RECOVERY,
1618 "timedout cm %p failed to allocate a tm\n", cm);
1624 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1626 MPI2_SCSI_IO_REQUEST *req;
1627 struct ccb_scsiio *csio;
1628 struct mps_softc *sc;
1629 struct mpssas_target *targ;
1630 struct mpssas_lun *lun;
1631 struct mps_command *cm;
1632 uint8_t i, lba_byte, *ref_tag_addr;
1633 uint16_t eedp_flags;
1634 uint32_t mpi_control;
1638 mtx_assert(&sc->mps_mtx, MA_OWNED);
1641 targ = &sassc->targets[csio->ccb_h.target_id];
1642 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1643 if (targ->handle == 0x0) {
1644 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1645 __func__, csio->ccb_h.target_id);
1646 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1650 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1651 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1652 "supported %u\n", __func__, csio->ccb_h.target_id);
1653 csio->ccb_h.status = CAM_TID_INVALID;
1658 * Sometimes, it is possible to get a command that is not "In
1659 * Progress" and was actually aborted by the upper layer. Check for
1660 * this here and complete the command without error.
1662 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1663 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1664 "target %u\n", __func__, csio->ccb_h.target_id);
1669 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1670 * that the volume has timed out. We want volumes to be enumerated
1671 * until they are deleted/removed, not just failed.
1673 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1674 if (targ->devinfo == 0)
1675 csio->ccb_h.status = CAM_REQ_CMP;
1677 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1682 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1683 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1684 csio->ccb_h.status = CAM_TID_INVALID;
1689 cm = mps_alloc_command(sc);
1691 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1692 xpt_freeze_simq(sassc->sim, 1);
1693 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1695 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1696 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1701 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1702 bzero(req, sizeof(*req));
1703 req->DevHandle = htole16(targ->handle);
1704 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1706 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1707 req->SenseBufferLength = MPS_SENSE_LEN;
1709 req->ChainOffset = 0;
1710 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1715 req->DataLength = htole32(csio->dxfer_len);
1716 req->BidirectionalDataLength = 0;
1717 req->IoFlags = htole16(csio->cdb_len);
1720 /* Note: BiDirectional transfers are not supported */
1721 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1723 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1724 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1727 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1728 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1732 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1736 if (csio->cdb_len == 32)
1737 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1739 * It looks like the hardware doesn't require an explicit tag
1740 * number for each transaction. SAM Task Management not supported
1743 switch (csio->tag_action) {
1744 case MSG_HEAD_OF_Q_TAG:
1745 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1747 case MSG_ORDERED_Q_TAG:
1748 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1751 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1753 case CAM_TAG_ACTION_NONE:
1754 case MSG_SIMPLE_Q_TAG:
1756 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1759 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1760 req->Control = htole32(mpi_control);
1761 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1762 mps_free_command(sc, cm);
1763 ccb->ccb_h.status = CAM_LUN_INVALID;
1768 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1769 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1771 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1772 req->IoFlags = htole16(csio->cdb_len);
1775 * Check if EEDP is supported and enabled. If it is then check if the
1776 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1777 * is formatted for EEDP support. If all of this is true, set CDB up
1778 * for EEDP transfer.
1780 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1781 if (sc->eedp_enabled && eedp_flags) {
1782 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1783 if (lun->lun_id == csio->ccb_h.target_lun) {
1788 if ((lun != NULL) && (lun->eedp_formatted)) {
1789 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1790 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1791 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1792 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1793 req->EEDPFlags = htole16(eedp_flags);
1796 * If CDB less than 32, fill in Primary Ref Tag with
1797 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1798 * already there. Also, set protection bit. FreeBSD
1799 * currently does not support CDBs bigger than 16, but
1800 * the code doesn't hurt, and will be here for the
1803 if (csio->cdb_len != 32) {
1804 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1805 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1806 PrimaryReferenceTag;
1807 for (i = 0; i < 4; i++) {
1809 req->CDB.CDB32[lba_byte + i];
1812 req->CDB.EEDP32.PrimaryReferenceTag =
1813 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1814 req->CDB.EEDP32.PrimaryApplicationTagMask =
1816 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1820 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1821 req->EEDPFlags = htole16(eedp_flags);
1822 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1828 cm->cm_length = csio->dxfer_len;
1829 if (cm->cm_length != 0) {
1831 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1835 cm->cm_sge = &req->SGL;
1836 cm->cm_sglsize = (32 - 24) * 4;
1837 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1838 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1839 cm->cm_complete = mpssas_scsiio_complete;
1840 cm->cm_complete_data = ccb;
1842 cm->cm_lun = csio->ccb_h.target_lun;
1846 * If HBA is a WD and the command is not for a retry, try to build a
1847 * direct I/O message. If failed, or the command is for a retry, send
1848 * the I/O to the IR volume itself.
1850 if (sc->WD_valid_config) {
1851 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1852 mpssas_direct_drive_io(sassc, cm, ccb);
1854 ccb->ccb_h.status = CAM_REQ_INPROG;
1858 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1859 mpssas_scsiio_timeout, cm);
1862 targ->outstanding++;
1863 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1864 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1866 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1867 __func__, cm, ccb, targ->outstanding);
1869 mps_map_command(sc, cm);
1874 mps_response_code(struct mps_softc *sc, u8 response_code)
1878 switch (response_code) {
1879 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1880 desc = "task management request completed";
1882 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1883 desc = "invalid frame";
1885 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1886 desc = "task management request not supported";
1888 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1889 desc = "task management request failed";
1891 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1892 desc = "task management request succeeded";
1894 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1895 desc = "invalid lun";
1898 desc = "overlapped tag attempted";
1900 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1901 desc = "task queued, however not sent to target";
1907 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1908 response_code, desc);
1911 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1914 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1915 Mpi2SCSIIOReply_t *mpi_reply)
1919 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1920 MPI2_IOCSTATUS_MASK;
1921 u8 scsi_state = mpi_reply->SCSIState;
1922 u8 scsi_status = mpi_reply->SCSIStatus;
1923 char *desc_ioc_state = NULL;
1924 char *desc_scsi_status = NULL;
1925 char *desc_scsi_state = sc->tmp_string;
1926 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1928 if (log_info == 0x31170000)
1931 switch (ioc_status) {
1932 case MPI2_IOCSTATUS_SUCCESS:
1933 desc_ioc_state = "success";
1935 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1936 desc_ioc_state = "invalid function";
1938 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1939 desc_ioc_state = "scsi recovered error";
1941 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1942 desc_ioc_state = "scsi invalid dev handle";
1944 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1945 desc_ioc_state = "scsi device not there";
1947 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1948 desc_ioc_state = "scsi data overrun";
1950 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1951 desc_ioc_state = "scsi data underrun";
1953 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1954 desc_ioc_state = "scsi io data error";
1956 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1957 desc_ioc_state = "scsi protocol error";
1959 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1960 desc_ioc_state = "scsi task terminated";
1962 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1963 desc_ioc_state = "scsi residual mismatch";
1965 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1966 desc_ioc_state = "scsi task mgmt failed";
1968 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1969 desc_ioc_state = "scsi ioc terminated";
1971 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1972 desc_ioc_state = "scsi ext terminated";
1974 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1975 desc_ioc_state = "eedp guard error";
1977 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1978 desc_ioc_state = "eedp ref tag error";
1980 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1981 desc_ioc_state = "eedp app tag error";
1984 desc_ioc_state = "unknown";
1988 switch (scsi_status) {
1989 case MPI2_SCSI_STATUS_GOOD:
1990 desc_scsi_status = "good";
1992 case MPI2_SCSI_STATUS_CHECK_CONDITION:
1993 desc_scsi_status = "check condition";
1995 case MPI2_SCSI_STATUS_CONDITION_MET:
1996 desc_scsi_status = "condition met";
1998 case MPI2_SCSI_STATUS_BUSY:
1999 desc_scsi_status = "busy";
2001 case MPI2_SCSI_STATUS_INTERMEDIATE:
2002 desc_scsi_status = "intermediate";
2004 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2005 desc_scsi_status = "intermediate condmet";
2007 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2008 desc_scsi_status = "reservation conflict";
2010 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2011 desc_scsi_status = "command terminated";
2013 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2014 desc_scsi_status = "task set full";
2016 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2017 desc_scsi_status = "aca active";
2019 case MPI2_SCSI_STATUS_TASK_ABORTED:
2020 desc_scsi_status = "task aborted";
2023 desc_scsi_status = "unknown";
2027 desc_scsi_state[0] = '\0';
2029 desc_scsi_state = " ";
2030 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2031 strcat(desc_scsi_state, "response info ");
2032 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2033 strcat(desc_scsi_state, "state terminated ");
2034 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2035 strcat(desc_scsi_state, "no status ");
2036 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2037 strcat(desc_scsi_state, "autosense failed ");
2038 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2039 strcat(desc_scsi_state, "autosense valid ");
2041 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2042 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2043 /* We can add more detail about underflow data here
2046 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2047 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2048 desc_scsi_state, scsi_state);
2050 if (sc->mps_debug & MPS_XINFO &&
2051 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2052 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2053 scsi_sense_print(csio);
2054 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2057 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2058 response_info = le32toh(mpi_reply->ResponseInfo);
2059 response_bytes = (u8 *)&response_info;
2060 mps_response_code(sc,response_bytes[0]);
2065 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2067 MPI2_SCSI_IO_REPLY *rep;
2069 struct ccb_scsiio *csio;
2070 struct mpssas_softc *sassc;
2071 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2072 u8 *TLR_bits, TLR_on;
2077 mps_dprint(sc, MPS_TRACE,
2078 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2079 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2080 cm->cm_targ->outstanding);
2082 callout_stop(&cm->cm_callout);
2083 mtx_assert(&sc->mps_mtx, MA_OWNED);
2086 ccb = cm->cm_complete_data;
2088 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2090 * XXX KDM if the chain allocation fails, does it matter if we do
2091 * the sync and unload here? It is simpler to do it in every case,
2092 * assuming it doesn't cause problems.
2094 if (cm->cm_data != NULL) {
2095 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2096 dir = BUS_DMASYNC_POSTREAD;
2097 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2098 dir = BUS_DMASYNC_POSTWRITE;
2099 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2100 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2103 cm->cm_targ->completed++;
2104 cm->cm_targ->outstanding--;
2105 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2106 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2108 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2109 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2110 if (cm->cm_reply != NULL)
2111 mpssas_log_command(cm, MPS_RECOVERY,
2112 "completed timedout cm %p ccb %p during recovery "
2113 "ioc %x scsi %x state %x xfer %u\n",
2115 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2116 le32toh(rep->TransferCount));
2118 mpssas_log_command(cm, MPS_RECOVERY,
2119 "completed timedout cm %p ccb %p during recovery\n",
2121 } else if (cm->cm_targ->tm != NULL) {
2122 if (cm->cm_reply != NULL)
2123 mpssas_log_command(cm, MPS_RECOVERY,
2124 "completed cm %p ccb %p during recovery "
2125 "ioc %x scsi %x state %x xfer %u\n",
2127 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2128 le32toh(rep->TransferCount));
2130 mpssas_log_command(cm, MPS_RECOVERY,
2131 "completed cm %p ccb %p during recovery\n",
2133 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2134 mpssas_log_command(cm, MPS_RECOVERY,
2135 "reset completed cm %p ccb %p\n",
2139 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2141 * We ran into an error after we tried to map the command,
2142 * so we're getting a callback without queueing the command
2143 * to the hardware. So we set the status here, and it will
2144 * be retained below. We'll go through the "fast path",
2145 * because there can be no reply when we haven't actually
2146 * gone out to the hardware.
2148 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2151 * Currently the only error included in the mask is
2152 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2153 * chain frames. We need to freeze the queue until we get
2154 * a command that completed without this error, which will
2155 * hopefully have some chain frames attached that we can
2156 * use. If we wanted to get smarter about it, we would
2157 * only unfreeze the queue in this condition when we're
2158 * sure that we're getting some chain frames back. That's
2159 * probably unnecessary.
2161 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2162 xpt_freeze_simq(sassc->sim, 1);
2163 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2164 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2165 "freezing SIM queue\n");
2169 /* Take the fast path to completion */
2170 if (cm->cm_reply == NULL) {
2171 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2172 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2173 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2175 ccb->ccb_h.status = CAM_REQ_CMP;
2176 ccb->csio.scsi_status = SCSI_STATUS_OK;
2178 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2179 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2180 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2181 mps_dprint(sc, MPS_XINFO,
2182 "Unfreezing SIM queue\n");
2187 * There are two scenarios where the status won't be
2188 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2189 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2191 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2193 * Freeze the dev queue so that commands are
2194 * executed in the correct order with after error
2197 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2198 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2200 mps_free_command(sc, cm);
2205 mpssas_log_command(cm, MPS_XINFO,
2206 "ioc %x scsi %x state %x xfer %u\n",
2207 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2208 le32toh(rep->TransferCount));
2211 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2212 * Volume if an error occurred (normal I/O retry). Use the original
2213 * CCB, but set a flag that this will be a retry so that it's sent to
2214 * the original volume. Free the command but reuse the CCB.
2216 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2217 mps_free_command(sc, cm);
2218 ccb->ccb_h.status = MPS_WD_RETRY;
2219 mpssas_action_scsiio(sassc, ccb);
2223 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2224 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2225 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2227 case MPI2_IOCSTATUS_SUCCESS:
2228 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2230 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2231 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2232 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2234 /* Completion failed at the transport level. */
2235 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2236 MPI2_SCSI_STATE_TERMINATED)) {
2237 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2241 /* In a modern packetized environment, an autosense failure
2242 * implies that there's not much else that can be done to
2243 * recover the command.
2245 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2246 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2251 * CAM doesn't care about SAS Response Info data, but if this is
2252 * the state check if TLR should be done. If not, clear the
2253 * TLR_bits for the target.
2255 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2256 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2257 MPS_SCSI_RI_INVALID_FRAME)) {
2258 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2259 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2263 * Intentionally override the normal SCSI status reporting
2264 * for these two cases. These are likely to happen in a
2265 * multi-initiator environment, and we want to make sure that
2266 * CAM retries these commands rather than fail them.
2268 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2269 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2270 ccb->ccb_h.status = CAM_REQ_ABORTED;
2274 /* Handle normal status and sense */
2275 csio->scsi_status = rep->SCSIStatus;
2276 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2277 ccb->ccb_h.status = CAM_REQ_CMP;
2279 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2281 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2282 int sense_len, returned_sense_len;
2284 returned_sense_len = min(le32toh(rep->SenseCount),
2285 sizeof(struct scsi_sense_data));
2286 if (returned_sense_len < ccb->csio.sense_len)
2287 ccb->csio.sense_resid = ccb->csio.sense_len -
2290 ccb->csio.sense_resid = 0;
2292 sense_len = min(returned_sense_len,
2293 ccb->csio.sense_len - ccb->csio.sense_resid);
2294 bzero(&ccb->csio.sense_data,
2295 sizeof(ccb->csio.sense_data));
2296 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2297 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2301 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2302 * and it's page code 0 (Supported Page List), and there is
2303 * inquiry data, and this is for a sequential access device, and
2304 * the device is an SSP target, and TLR is supported by the
2305 * controller, turn the TLR_bits value ON if page 0x90 is
2308 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2309 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2310 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2311 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2312 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2313 T_SEQUENTIAL) && (sc->control_TLR) &&
2314 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2315 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2316 vpd_list = (struct scsi_vpd_supported_page_list *)
2318 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2320 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2321 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2322 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2323 csio->cdb_io.cdb_bytes[4];
2324 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2325 if (vpd_list->list[i] == 0x90) {
2332 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2333 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2335 * If devinfo is 0 this will be a volume. In that case don't
2336 * tell CAM that the volume is not there. We want volumes to
2337 * be enumerated until they are deleted/removed, not just
2340 if (cm->cm_targ->devinfo == 0)
2341 ccb->ccb_h.status = CAM_REQ_CMP;
2343 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2345 case MPI2_IOCSTATUS_INVALID_SGL:
2346 mps_print_scsiio_cmd(sc, cm);
2347 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2349 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2351 * This is one of the responses that comes back when an I/O
2352 * has been aborted. If it is because of a timeout that we
2353 * initiated, just set the status to CAM_CMD_TIMEOUT.
2354 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2355 * command is the same (it gets retried, subject to the
2356 * retry counter), the only difference is what gets printed
2359 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2360 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2362 ccb->ccb_h.status = CAM_REQ_ABORTED;
2364 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2365 /* resid is ignored for this condition */
2367 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2369 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2370 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2372 * Since these are generally external (i.e. hopefully
2373 * transient transport-related) errors, retry these without
2374 * decrementing the retry count.
2376 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2377 mpssas_log_command(cm, MPS_INFO,
2378 "terminated ioc %x scsi %x state %x xfer %u\n",
2379 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2380 le32toh(rep->TransferCount));
2382 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2383 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2384 case MPI2_IOCSTATUS_INVALID_VPID:
2385 case MPI2_IOCSTATUS_INVALID_FIELD:
2386 case MPI2_IOCSTATUS_INVALID_STATE:
2387 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2388 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2389 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2390 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2391 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2393 mpssas_log_command(cm, MPS_XINFO,
2394 "completed ioc %x scsi %x state %x xfer %u\n",
2395 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2396 le32toh(rep->TransferCount));
2397 csio->resid = cm->cm_length;
2398 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2402 mps_sc_failed_io_info(sc,csio,rep);
2404 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2405 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2406 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2407 mps_dprint(sc, MPS_XINFO, "Command completed, "
2408 "unfreezing SIM queue\n");
2411 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2412 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2413 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2416 mps_free_command(sc, cm);
2420 /* All Request reached here are Endian safe */
2422 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2424 pMpi2SCSIIORequest_t pIO_req;
2425 struct mps_softc *sc = sassc->sc;
2427 uint32_t physLBA, stripe_offset, stripe_unit;
2428 uint32_t io_size, column;
2429 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2432 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2433 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2434 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2435 * bit different than the 10/16 CDBs, handle them separately.
2437 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2438 CDB = pIO_req->CDB.CDB32;
2441 * Handle 6 byte CDBs.
2443 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2444 (CDB[0] == WRITE_6))) {
2446 * Get the transfer size in blocks.
2448 io_size = (cm->cm_length >> sc->DD_block_exponent);
2451 * Get virtual LBA given in the CDB.
2453 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2454 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2457 * Check that LBA range for I/O does not exceed volume's
2460 if ((virtLBA + (uint64_t)io_size - 1) <=
2463 * Check if the I/O crosses a stripe boundary. If not,
2464 * translate the virtual LBA to a physical LBA and set
2465 * the DevHandle for the PhysDisk to be used. If it
2466 * does cross a boundry, do normal I/O. To get the
2467 * right DevHandle to use, get the map number for the
2468 * column, then use that map number to look up the
2469 * DevHandle of the PhysDisk.
2471 stripe_offset = (uint32_t)virtLBA &
2472 (sc->DD_stripe_size - 1);
2473 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2474 physLBA = (uint32_t)virtLBA >>
2475 sc->DD_stripe_exponent;
2476 stripe_unit = physLBA / sc->DD_num_phys_disks;
2477 column = physLBA % sc->DD_num_phys_disks;
2478 pIO_req->DevHandle =
2479 htole16(sc->DD_column_map[column].dev_handle);
2480 /* ???? Is this endian safe*/
2481 cm->cm_desc.SCSIIO.DevHandle =
2484 physLBA = (stripe_unit <<
2485 sc->DD_stripe_exponent) + stripe_offset;
2486 ptrLBA = &pIO_req->CDB.CDB32[1];
2487 physLBA_byte = (uint8_t)(physLBA >> 16);
2488 *ptrLBA = physLBA_byte;
2489 ptrLBA = &pIO_req->CDB.CDB32[2];
2490 physLBA_byte = (uint8_t)(physLBA >> 8);
2491 *ptrLBA = physLBA_byte;
2492 ptrLBA = &pIO_req->CDB.CDB32[3];
2493 physLBA_byte = (uint8_t)physLBA;
2494 *ptrLBA = physLBA_byte;
2497 * Set flag that Direct Drive I/O is
2500 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2507 * Handle 10, 12 or 16 byte CDBs.
2509 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2510 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2511 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2512 (CDB[0] == WRITE_12))) {
2514 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2515 * are 0. If not, this is accessing beyond 2TB so handle it in
2516 * the else section. 10-byte and 12-byte CDB's are OK.
2517 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2518 * ready to accept 12byte CDB for Direct IOs.
2520 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2521 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2522 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2524 * Get the transfer size in blocks.
2526 io_size = (cm->cm_length >> sc->DD_block_exponent);
2529 * Get virtual LBA. Point to correct lower 4 bytes of
2530 * LBA in the CDB depending on command.
2532 lba_idx = ((CDB[0] == READ_12) ||
2533 (CDB[0] == WRITE_12) ||
2534 (CDB[0] == READ_10) ||
2535 (CDB[0] == WRITE_10))? 2 : 6;
2536 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2537 ((uint64_t)CDB[lba_idx + 1] << 16) |
2538 ((uint64_t)CDB[lba_idx + 2] << 8) |
2539 (uint64_t)CDB[lba_idx + 3];
2542 * Check that LBA range for I/O does not exceed volume's
2545 if ((virtLBA + (uint64_t)io_size - 1) <=
2548 * Check if the I/O crosses a stripe boundary.
2549 * If not, translate the virtual LBA to a
2550 * physical LBA and set the DevHandle for the
2551 * PhysDisk to be used. If it does cross a
2552 * boundry, do normal I/O. To get the right
2553 * DevHandle to use, get the map number for the
2554 * column, then use that map number to look up
2555 * the DevHandle of the PhysDisk.
2557 stripe_offset = (uint32_t)virtLBA &
2558 (sc->DD_stripe_size - 1);
2559 if ((stripe_offset + io_size) <=
2560 sc->DD_stripe_size) {
2561 physLBA = (uint32_t)virtLBA >>
2562 sc->DD_stripe_exponent;
2563 stripe_unit = physLBA /
2564 sc->DD_num_phys_disks;
2566 sc->DD_num_phys_disks;
2567 pIO_req->DevHandle =
2568 htole16(sc->DD_column_map[column].
2570 cm->cm_desc.SCSIIO.DevHandle =
2573 physLBA = (stripe_unit <<
2574 sc->DD_stripe_exponent) +
2577 &pIO_req->CDB.CDB32[lba_idx];
2578 physLBA_byte = (uint8_t)(physLBA >> 24);
2579 *ptrLBA = physLBA_byte;
2581 &pIO_req->CDB.CDB32[lba_idx + 1];
2582 physLBA_byte = (uint8_t)(physLBA >> 16);
2583 *ptrLBA = physLBA_byte;
2585 &pIO_req->CDB.CDB32[lba_idx + 2];
2586 physLBA_byte = (uint8_t)(physLBA >> 8);
2587 *ptrLBA = physLBA_byte;
2589 &pIO_req->CDB.CDB32[lba_idx + 3];
2590 physLBA_byte = (uint8_t)physLBA;
2591 *ptrLBA = physLBA_byte;
2594 * Set flag that Direct Drive I/O is
2597 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2602 * 16-byte CDB and the upper 4 bytes of the CDB are not
2603 * 0. Get the transfer size in blocks.
2605 io_size = (cm->cm_length >> sc->DD_block_exponent);
2610 virtLBA = ((uint64_t)CDB[2] << 54) |
2611 ((uint64_t)CDB[3] << 48) |
2612 ((uint64_t)CDB[4] << 40) |
2613 ((uint64_t)CDB[5] << 32) |
2614 ((uint64_t)CDB[6] << 24) |
2615 ((uint64_t)CDB[7] << 16) |
2616 ((uint64_t)CDB[8] << 8) |
2620 * Check that LBA range for I/O does not exceed volume's
2623 if ((virtLBA + (uint64_t)io_size - 1) <=
2626 * Check if the I/O crosses a stripe boundary.
2627 * If not, translate the virtual LBA to a
2628 * physical LBA and set the DevHandle for the
2629 * PhysDisk to be used. If it does cross a
2630 * boundry, do normal I/O. To get the right
2631 * DevHandle to use, get the map number for the
2632 * column, then use that map number to look up
2633 * the DevHandle of the PhysDisk.
2635 stripe_offset = (uint32_t)virtLBA &
2636 (sc->DD_stripe_size - 1);
2637 if ((stripe_offset + io_size) <=
2638 sc->DD_stripe_size) {
2639 physLBA = (uint32_t)(virtLBA >>
2640 sc->DD_stripe_exponent);
2641 stripe_unit = physLBA /
2642 sc->DD_num_phys_disks;
2644 sc->DD_num_phys_disks;
2645 pIO_req->DevHandle =
2646 htole16(sc->DD_column_map[column].
2648 cm->cm_desc.SCSIIO.DevHandle =
2651 physLBA = (stripe_unit <<
2652 sc->DD_stripe_exponent) +
2656 * Set upper 4 bytes of LBA to 0. We
2657 * assume that the phys disks are less
2658 * than 2 TB's in size. Then, set the
2661 pIO_req->CDB.CDB32[2] = 0;
2662 pIO_req->CDB.CDB32[3] = 0;
2663 pIO_req->CDB.CDB32[4] = 0;
2664 pIO_req->CDB.CDB32[5] = 0;
2665 ptrLBA = &pIO_req->CDB.CDB32[6];
2666 physLBA_byte = (uint8_t)(physLBA >> 24);
2667 *ptrLBA = physLBA_byte;
2668 ptrLBA = &pIO_req->CDB.CDB32[7];
2669 physLBA_byte = (uint8_t)(physLBA >> 16);
2670 *ptrLBA = physLBA_byte;
2671 ptrLBA = &pIO_req->CDB.CDB32[8];
2672 physLBA_byte = (uint8_t)(physLBA >> 8);
2673 *ptrLBA = physLBA_byte;
2674 ptrLBA = &pIO_req->CDB.CDB32[9];
2675 physLBA_byte = (uint8_t)physLBA;
2676 *ptrLBA = physLBA_byte;
2679 * Set flag that Direct Drive I/O is
2682 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2689 #if __FreeBSD_version >= 900026
2691 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2693 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2694 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2698 ccb = cm->cm_complete_data;
2701 * Currently there should be no way we can hit this case. It only
2702 * happens when we have a failure to allocate chain frames, and SMP
2703 * commands require two S/G elements only. That should be handled
2704 * in the standard request size.
2706 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2707 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2708 __func__, cm->cm_flags);
2709 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2713 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2715 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2716 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2720 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2721 sasaddr = le32toh(req->SASAddress.Low);
2722 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2724 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2725 MPI2_IOCSTATUS_SUCCESS ||
2726 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2727 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2728 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2729 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2733 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2734 "%#jx completed successfully\n", __func__,
2735 (uintmax_t)sasaddr);
2737 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2738 ccb->ccb_h.status = CAM_REQ_CMP;
2740 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2744 * We sync in both directions because we had DMAs in the S/G list
2745 * in both directions.
2747 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2748 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2749 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2750 mps_free_command(sc, cm);
2755 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2757 struct mps_command *cm;
2758 uint8_t *request, *response;
2759 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2760 struct mps_softc *sc;
2769 * XXX We don't yet support physical addresses here.
2771 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2772 case CAM_DATA_PADDR:
2773 case CAM_DATA_SG_PADDR:
2774 mps_dprint(sc, MPS_ERROR,
2775 "%s: physical addresses not supported\n", __func__);
2776 ccb->ccb_h.status = CAM_REQ_INVALID;
2781 * The chip does not support more than one buffer for the
2782 * request or response.
2784 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2785 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2786 mps_dprint(sc, MPS_ERROR,
2787 "%s: multiple request or response "
2788 "buffer segments not supported for SMP\n",
2790 ccb->ccb_h.status = CAM_REQ_INVALID;
2796 * The CAM_SCATTER_VALID flag was originally implemented
2797 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2798 * We have two. So, just take that flag to mean that we
2799 * might have S/G lists, and look at the S/G segment count
2800 * to figure out whether that is the case for each individual
2803 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2804 bus_dma_segment_t *req_sg;
2806 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2807 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2809 request = ccb->smpio.smp_request;
2811 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2812 bus_dma_segment_t *rsp_sg;
2814 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2815 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2817 response = ccb->smpio.smp_response;
2819 case CAM_DATA_VADDR:
2820 request = ccb->smpio.smp_request;
2821 response = ccb->smpio.smp_response;
2824 ccb->ccb_h.status = CAM_REQ_INVALID;
2829 cm = mps_alloc_command(sc);
2831 mps_dprint(sc, MPS_ERROR,
2832 "%s: cannot allocate command\n", __func__);
2833 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2838 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2839 bzero(req, sizeof(*req));
2840 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2842 /* Allow the chip to use any route to this SAS address. */
2843 req->PhysicalPort = 0xff;
2845 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2847 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2849 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2850 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2852 mpi_init_sge(cm, req, &req->SGL);
2855 * Set up a uio to pass into mps_map_command(). This allows us to
2856 * do one map command, and one busdma call in there.
2858 cm->cm_uio.uio_iov = cm->cm_iovec;
2859 cm->cm_uio.uio_iovcnt = 2;
2860 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2863 * The read/write flag isn't used by busdma, but set it just in
2864 * case. This isn't exactly accurate, either, since we're going in
2867 cm->cm_uio.uio_rw = UIO_WRITE;
2869 cm->cm_iovec[0].iov_base = request;
2870 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2871 cm->cm_iovec[1].iov_base = response;
2872 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2874 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2875 cm->cm_iovec[1].iov_len;
2878 * Trigger a warning message in mps_data_cb() for the user if we
2879 * wind up exceeding two S/G segments. The chip expects one
2880 * segment for the request and another for the response.
2882 cm->cm_max_segs = 2;
2884 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2885 cm->cm_complete = mpssas_smpio_complete;
2886 cm->cm_complete_data = ccb;
2889 * Tell the mapping code that we're using a uio, and that this is
2890 * an SMP passthrough request. There is a little special-case
2891 * logic there (in mps_data_cb()) to handle the bidirectional
2894 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2895 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2897 /* The chip data format is little endian. */
2898 req->SASAddress.High = htole32(sasaddr >> 32);
2899 req->SASAddress.Low = htole32(sasaddr);
2902 * XXX Note that we don't have a timeout/abort mechanism here.
2903 * From the manual, it looks like task management requests only
2904 * work for SCSI IO and SATA passthrough requests. We may need to
2905 * have a mechanism to retry requests in the event of a chip reset
2906 * at least. Hopefully the chip will insure that any errors short
2907 * of that are relayed back to the driver.
2909 error = mps_map_command(sc, cm);
2910 if ((error != 0) && (error != EINPROGRESS)) {
2911 mps_dprint(sc, MPS_ERROR,
2912 "%s: error %d returned from mps_map_command()\n",
2920 mps_free_command(sc, cm);
2921 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2928 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2930 struct mps_softc *sc;
2931 struct mpssas_target *targ;
2932 uint64_t sasaddr = 0;
2937 * Make sure the target exists.
2939 targ = &sassc->targets[ccb->ccb_h.target_id];
2940 if (targ->handle == 0x0) {
2941 mps_dprint(sc, MPS_ERROR,
2942 "%s: target %d does not exist!\n", __func__,
2943 ccb->ccb_h.target_id);
2944 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2950 * If this device has an embedded SMP target, we'll talk to it
2952 * figure out what the expander's address is.
2954 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2955 sasaddr = targ->sasaddr;
2958 * If we don't have a SAS address for the expander yet, try
2959 * grabbing it from the page 0x83 information cached in the
2960 * transport layer for this target. LSI expanders report the
2961 * expander SAS address as the port-associated SAS address in
2962 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2965 * XXX KDM disable this for now, but leave it commented out so that
2966 * it is obvious that this is another possible way to get the SAS
2969 * The parent handle method below is a little more reliable, and
2970 * the other benefit is that it works for devices other than SES
2971 * devices. So you can send a SMP request to a da(4) device and it
2972 * will get routed to the expander that device is attached to.
2973 * (Assuming the da(4) device doesn't contain an SMP target...)
2977 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2981 * If we still don't have a SAS address for the expander, look for
2982 * the parent device of this device, which is probably the expander.
2985 #ifdef OLD_MPS_PROBE
2986 struct mpssas_target *parent_target;
2989 if (targ->parent_handle == 0x0) {
2990 mps_dprint(sc, MPS_ERROR,
2991 "%s: handle %d does not have a valid "
2992 "parent handle!\n", __func__, targ->handle);
2993 ccb->ccb_h.status = CAM_REQ_INVALID;
2996 #ifdef OLD_MPS_PROBE
2997 parent_target = mpssas_find_target_by_handle(sassc, 0,
2998 targ->parent_handle);
3000 if (parent_target == NULL) {
3001 mps_dprint(sc, MPS_ERROR,
3002 "%s: handle %d does not have a valid "
3003 "parent target!\n", __func__, targ->handle);
3004 ccb->ccb_h.status = CAM_REQ_INVALID;
3008 if ((parent_target->devinfo &
3009 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3010 mps_dprint(sc, MPS_ERROR,
3011 "%s: handle %d parent %d does not "
3012 "have an SMP target!\n", __func__,
3013 targ->handle, parent_target->handle);
3014 ccb->ccb_h.status = CAM_REQ_INVALID;
3019 sasaddr = parent_target->sasaddr;
3020 #else /* OLD_MPS_PROBE */
3021 if ((targ->parent_devinfo &
3022 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3023 mps_dprint(sc, MPS_ERROR,
3024 "%s: handle %d parent %d does not "
3025 "have an SMP target!\n", __func__,
3026 targ->handle, targ->parent_handle);
3027 ccb->ccb_h.status = CAM_REQ_INVALID;
3031 if (targ->parent_sasaddr == 0x0) {
3032 mps_dprint(sc, MPS_ERROR,
3033 "%s: handle %d parent handle %d does "
3034 "not have a valid SAS address!\n",
3035 __func__, targ->handle, targ->parent_handle);
3036 ccb->ccb_h.status = CAM_REQ_INVALID;
3040 sasaddr = targ->parent_sasaddr;
3041 #endif /* OLD_MPS_PROBE */
3046 mps_dprint(sc, MPS_INFO,
3047 "%s: unable to find SAS address for handle %d\n",
3048 __func__, targ->handle);
3049 ccb->ccb_h.status = CAM_REQ_INVALID;
3052 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3060 #endif //__FreeBSD_version >= 900026
3063 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3065 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3066 struct mps_softc *sc;
3067 struct mps_command *tm;
3068 struct mpssas_target *targ;
3070 MPS_FUNCTRACE(sassc->sc);
3071 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3074 tm = mps_alloc_command(sc);
3076 mps_dprint(sc, MPS_ERROR,
3077 "command alloc failure in mpssas_action_resetdev\n");
3078 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3083 targ = &sassc->targets[ccb->ccb_h.target_id];
3084 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3085 req->DevHandle = htole16(targ->handle);
3086 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3087 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3089 /* SAS Hard Link Reset / SATA Link Reset */
3090 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3093 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3094 tm->cm_complete = mpssas_resetdev_complete;
3095 tm->cm_complete_data = ccb;
3097 mps_map_command(sc, tm);
3101 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3103 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3107 mtx_assert(&sc->mps_mtx, MA_OWNED);
3109 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3110 ccb = tm->cm_complete_data;
3113 * Currently there should be no way we can hit this case. It only
3114 * happens when we have a failure to allocate chain frames, and
3115 * task management commands don't have S/G lists.
3117 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3118 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3120 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3122 mps_dprint(sc, MPS_ERROR,
3123 "%s: cm_flags = %#x for reset of handle %#04x! "
3124 "This should not happen!\n", __func__, tm->cm_flags,
3126 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3130 mps_dprint(sc, MPS_XINFO,
3131 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3132 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3134 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3135 ccb->ccb_h.status = CAM_REQ_CMP;
3136 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3140 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3144 mpssas_free_tm(sc, tm);
3149 mpssas_poll(struct cam_sim *sim)
3151 struct mpssas_softc *sassc;
3153 sassc = cam_sim_softc(sim);
3155 if (sassc->sc->mps_debug & MPS_TRACE) {
3156 /* frequent debug messages during a panic just slow
3157 * everything down too much.
3159 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3160 sassc->sc->mps_debug &= ~MPS_TRACE;
3163 mps_intr_locked(sassc->sc);
3167 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3170 struct mps_softc *sc;
3172 sc = (struct mps_softc *)callback_arg;
3175 #if (__FreeBSD_version >= 1000006) || \
3176 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3177 case AC_ADVINFO_CHANGED: {
3178 struct mpssas_target *target;
3179 struct mpssas_softc *sassc;
3180 struct scsi_read_capacity_data_long rcap_buf;
3181 struct ccb_dev_advinfo cdai;
3182 struct mpssas_lun *lun;
3187 buftype = (uintptr_t)arg;
3193 * We're only interested in read capacity data changes.
3195 if (buftype != CDAI_TYPE_RCAPLONG)
3199 * We should have a handle for this, but check to make sure.
3201 target = &sassc->targets[xpt_path_target_id(path)];
3202 if (target->handle == 0)
3205 lunid = xpt_path_lun_id(path);
3207 SLIST_FOREACH(lun, &target->luns, lun_link) {
3208 if (lun->lun_id == lunid) {
3214 if (found_lun == 0) {
3215 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3218 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3219 "LUN for EEDP support.\n");
3222 lun->lun_id = lunid;
3223 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3226 bzero(&rcap_buf, sizeof(rcap_buf));
3227 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3228 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3229 cdai.ccb_h.flags = CAM_DIR_IN;
3230 cdai.buftype = CDAI_TYPE_RCAPLONG;
3232 cdai.bufsiz = sizeof(rcap_buf);
3233 cdai.buf = (uint8_t *)&rcap_buf;
3234 xpt_action((union ccb *)&cdai);
3235 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3236 cam_release_devq(cdai.ccb_h.path,
3239 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3240 && (rcap_buf.prot & SRC16_PROT_EN)) {
3241 lun->eedp_formatted = TRUE;
3242 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3244 lun->eedp_formatted = FALSE;
3245 lun->eedp_block_size = 0;
3250 case AC_FOUND_DEVICE: {
3251 struct ccb_getdev *cgd;
3254 mpssas_check_eedp(sc, path, cgd);
3263 #if (__FreeBSD_version < 901503) || \
3264 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3266 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3267 struct ccb_getdev *cgd)
3269 struct mpssas_softc *sassc = sc->sassc;
3270 struct ccb_scsiio *csio;
3271 struct scsi_read_capacity_16 *scsi_cmd;
3272 struct scsi_read_capacity_eedp *rcap_buf;
3274 target_id_t targetid;
3277 struct cam_path *local_path;
3278 struct mpssas_target *target;
3279 struct mpssas_lun *lun;
3284 pathid = cam_sim_path(sassc->sim);
3285 targetid = xpt_path_target_id(path);
3286 lunid = xpt_path_lun_id(path);
3288 target = &sassc->targets[targetid];
3289 if (target->handle == 0x0)
3293 * Determine if the device is EEDP capable.
3295 * If this flag is set in the inquiry data,
3296 * the device supports protection information,
3297 * and must support the 16 byte read
3298 * capacity command, otherwise continue without
3299 * sending read cap 16
3301 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3305 * Issue a READ CAPACITY 16 command. This info
3306 * is used to determine if the LUN is formatted
3309 ccb = xpt_alloc_ccb_nowait();
3311 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3312 "for EEDP support.\n");
3316 if (xpt_create_path(&local_path, xpt_periph,
3317 pathid, targetid, lunid) != CAM_REQ_CMP) {
3318 mps_dprint(sc, MPS_ERROR, "Unable to create "
3319 "path for EEDP support\n");
3325 * If LUN is already in list, don't create a new
3329 SLIST_FOREACH(lun, &target->luns, lun_link) {
3330 if (lun->lun_id == lunid) {
3336 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3339 mps_dprint(sc, MPS_ERROR,
3340 "Unable to alloc LUN for EEDP support.\n");
3341 xpt_free_path(local_path);
3345 lun->lun_id = lunid;
3346 SLIST_INSERT_HEAD(&target->luns, lun,
3350 xpt_path_string(local_path, path_str, sizeof(path_str));
3351 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3352 path_str, target->handle);
3355 * Issue a READ CAPACITY 16 command for the LUN.
3356 * The mpssas_read_cap_done function will load
3357 * the read cap info into the LUN struct.
3359 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3360 M_MPT2, M_NOWAIT | M_ZERO);
3361 if (rcap_buf == NULL) {
3362 mps_dprint(sc, MPS_FAULT,
3363 "Unable to alloc read capacity buffer for EEDP support.\n");
3364 xpt_free_path(ccb->ccb_h.path);
3368 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3370 csio->ccb_h.func_code = XPT_SCSI_IO;
3371 csio->ccb_h.flags = CAM_DIR_IN;
3372 csio->ccb_h.retry_count = 4;
3373 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3374 csio->ccb_h.timeout = 60000;
3375 csio->data_ptr = (uint8_t *)rcap_buf;
3376 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3377 csio->sense_len = MPS_SENSE_LEN;
3378 csio->cdb_len = sizeof(*scsi_cmd);
3379 csio->tag_action = MSG_SIMPLE_Q_TAG;
3381 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3382 bzero(scsi_cmd, sizeof(*scsi_cmd));
3383 scsi_cmd->opcode = 0x9E;
3384 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3385 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3387 ccb->ccb_h.ppriv_ptr1 = sassc;
3392 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3394 struct mpssas_softc *sassc;
3395 struct mpssas_target *target;
3396 struct mpssas_lun *lun;
3397 struct scsi_read_capacity_eedp *rcap_buf;
3399 if (done_ccb == NULL)
3402 /* Driver need to release devq, it Scsi command is
3403 * generated by driver internally.
3404 * Currently there is a single place where driver
3405 * calls scsi command internally. In future if driver
3406 * calls more scsi command internally, it needs to release
3407 * devq internally, since those command will not go back to
3410 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3411 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3412 xpt_release_devq(done_ccb->ccb_h.path,
3413 /*count*/ 1, /*run_queue*/TRUE);
3416 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3419 * Get the LUN ID for the path and look it up in the LUN list for the
3422 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3423 target = &sassc->targets[done_ccb->ccb_h.target_id];
3424 SLIST_FOREACH(lun, &target->luns, lun_link) {
3425 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3429 * Got the LUN in the target's LUN list. Fill it in
3430 * with EEDP info. If the READ CAP 16 command had some
3431 * SCSI error (common if command is not supported), mark
3432 * the lun as not supporting EEDP and set the block size
3435 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3436 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3437 lun->eedp_formatted = FALSE;
3438 lun->eedp_block_size = 0;
3442 if (rcap_buf->protect & 0x01) {
3443 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3444 "target ID %d is formatted for EEDP "
3445 "support.\n", done_ccb->ccb_h.target_lun,
3446 done_ccb->ccb_h.target_id);
3447 lun->eedp_formatted = TRUE;
3448 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3453 // Finished with this CCB and path.
3454 free(rcap_buf, M_MPT2);
3455 xpt_free_path(done_ccb->ccb_h.path);
3456 xpt_free_ccb(done_ccb);
3458 #endif /* (__FreeBSD_version < 901503) || \
3459 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3462 mpssas_startup(struct mps_softc *sc)
3464 struct mpssas_softc *sassc;
3467 * Send the port enable message and set the wait_for_port_enable flag.
3468 * This flag helps to keep the simq frozen until all discovery events
3472 mpssas_startup_increment(sassc);
3473 sc->wait_for_port_enable = 1;
3474 mpssas_send_portenable(sc);
3479 mpssas_send_portenable(struct mps_softc *sc)
3481 MPI2_PORT_ENABLE_REQUEST *request;
3482 struct mps_command *cm;
3486 if ((cm = mps_alloc_command(sc)) == NULL)
3488 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3489 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3490 request->MsgFlags = 0;
3492 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3493 cm->cm_complete = mpssas_portenable_complete;
3497 mps_map_command(sc, cm);
3498 mps_dprint(sc, MPS_XINFO,
3499 "mps_send_portenable finished cm %p req %p complete %p\n",
3500 cm, cm->cm_req, cm->cm_complete);
3505 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3507 MPI2_PORT_ENABLE_REPLY *reply;
3508 struct mpssas_softc *sassc;
3514 * Currently there should be no way we can hit this case. It only
3515 * happens when we have a failure to allocate chain frames, and
3516 * port enable commands don't have S/G lists.
3518 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3519 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3520 "This should not happen!\n", __func__, cm->cm_flags);
3523 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3525 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3526 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3527 MPI2_IOCSTATUS_SUCCESS)
3528 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3530 mps_free_command(sc, cm);
3531 if (sc->mps_ich.ich_arg != NULL) {
3532 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3533 config_intrhook_disestablish(&sc->mps_ich);
3534 sc->mps_ich.ich_arg = NULL;
3538 * Get WarpDrive info after discovery is complete but before the scan
3539 * starts. At this point, all devices are ready to be exposed to the
3540 * OS. If devices should be hidden instead, take them out of the
3541 * 'targets' array before the scan. The devinfo for a disk will have
3542 * some info and a volume's will be 0. Use that to remove disks.
3544 mps_wd_config_pages(sc);
3547 * Done waiting for port enable to complete. Decrement the refcount.
3548 * If refcount is 0, discovery is complete and a rescan of the bus can
3549 * take place. Since the simq was explicitly frozen before port
3550 * enable, it must be explicitly released here to keep the
3551 * freeze/release count in sync.
3553 sc->wait_for_port_enable = 0;
3554 sc->port_enable_complete = 1;
3555 wakeup(&sc->port_enable_complete);
3556 mpssas_startup_decrement(sassc);
3557 xpt_release_simq(sassc->sim, 1);
3561 mpssas_check_id(struct mpssas_softc *sassc, int id)
3563 struct mps_softc *sc = sassc->sc;
3567 ids = &sc->exclude_ids[0];
3568 while((name = strsep(&ids, ",")) != NULL) {
3569 if (name[0] == '\0')
3571 if (strtol(name, NULL, 0) == (long)id)