2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * LSI MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for LSI MPT2 */
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
88 #define MPSSAS_DISCOVERY_TIMEOUT 20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126 struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149 struct mps_command *cm);
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
154 struct mpssas_target *target;
157 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
158 target = &sassc->targets[i];
159 if (target->handle == handle)
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery. Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
174 mpssas_startup_increment(struct mpssas_softc *sassc)
176 MPS_FUNCTRACE(sassc->sc);
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INIT,
182 "%s freezing simq\n", __func__);
183 xpt_freeze_simq(sassc->sim, 1);
185 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
186 sassc->startup_refcount);
191 mpssas_startup_decrement(struct mpssas_softc *sassc)
193 MPS_FUNCTRACE(sassc->sc);
195 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
196 if (--sassc->startup_refcount == 0) {
197 /* finished all discovery-related actions, release
198 * the simq and rescan for the latest topology.
200 mps_dprint(sassc->sc, MPS_INIT,
201 "%s releasing simq\n", __func__);
202 sassc->flags &= ~MPSSAS_IN_STARTUP;
203 #if (__FreeBSD_version >= 1000039) || \
204 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
207 xpt_release_simq(sassc->sim, 1);
208 mpssas_rescan_target(sassc->sc, NULL);
211 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
212 sassc->startup_refcount);
216 /* LSI's firmware requires us to stop sending commands when we're doing task
217 * management, so refcount the TMs and keep the simq frozen when any are in
221 mpssas_alloc_tm(struct mps_softc *sc)
223 struct mps_command *tm;
226 tm = mps_alloc_high_priority_command(sc);
228 if (sc->sassc->tm_count++ == 0) {
229 mps_dprint(sc, MPS_RECOVERY,
230 "%s freezing simq\n", __func__);
231 xpt_freeze_simq(sc->sassc->sim, 1);
233 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
234 sc->sassc->tm_count);
240 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
242 mps_dprint(sc, MPS_TRACE, "%s", __func__);
246 /* if there are no TMs in use, we can release the simq. We use our
247 * own refcount so that it's easier for a diag reset to cleanup and
250 if (--sc->sassc->tm_count == 0) {
251 mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
252 xpt_release_simq(sc->sassc->sim, 1);
254 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
255 sc->sassc->tm_count);
257 mps_free_high_priority_command(sc, tm);
261 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
263 struct mpssas_softc *sassc = sc->sassc;
265 target_id_t targetid;
269 pathid = cam_sim_path(sassc->sim);
271 targetid = CAM_TARGET_WILDCARD;
273 targetid = targ - sassc->targets;
276 * Allocate a CCB and schedule a rescan.
278 ccb = xpt_alloc_ccb_nowait();
280 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
284 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
285 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
286 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
291 if (targetid == CAM_TARGET_WILDCARD)
292 ccb->ccb_h.func_code = XPT_SCAN_BUS;
294 ccb->ccb_h.func_code = XPT_SCAN_TGT;
296 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
301 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
311 sbuf_new(&sb, str, sizeof(str), 0);
315 if (cm->cm_ccb != NULL) {
316 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
318 sbuf_cat(&sb, path_str);
319 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
320 scsi_command_string(&cm->cm_ccb->csio, &sb);
321 sbuf_printf(&sb, "length %d ",
322 cm->cm_ccb->csio.dxfer_len);
326 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
327 cam_sim_name(cm->cm_sc->sassc->sim),
328 cam_sim_unit(cm->cm_sc->sassc->sim),
329 cam_sim_bus(cm->cm_sc->sassc->sim),
330 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
334 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
335 sbuf_vprintf(&sb, fmt, ap);
337 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
344 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
346 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
347 struct mpssas_target *targ;
352 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
353 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
357 /* XXX retry the remove after the diag reset completes? */
358 mps_dprint(sc, MPS_FAULT,
359 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
360 mpssas_free_tm(sc, tm);
364 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
365 mps_dprint(sc, MPS_FAULT,
366 "IOCStatus = 0x%x while resetting device 0x%x\n",
367 reply->IOCStatus, handle);
368 mpssas_free_tm(sc, tm);
372 mps_dprint(sc, MPS_XINFO,
373 "Reset aborted %u commands\n", reply->TerminationCount);
374 mps_free_reply(sc, tm->cm_reply_data);
375 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
377 mps_dprint(sc, MPS_XINFO,
378 "clearing target %u handle 0x%04x\n", targ->tid, handle);
381 * Don't clear target if remove fails because things will get confusing.
382 * Leave the devname and sasaddr intact so that we know to avoid reusing
383 * this target id if possible, and so we can assign the same target id
384 * to this device if it comes back in the future.
386 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
389 targ->encl_handle = 0x0;
390 targ->encl_slot = 0x0;
391 targ->exp_dev_handle = 0x0;
393 targ->linkrate = 0x0;
398 mpssas_free_tm(sc, tm);
403 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
404 * Otherwise Volume Delete is same as Bare Drive Removal.
407 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
409 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
410 struct mps_softc *sc;
411 struct mps_command *cm;
412 struct mpssas_target *targ = NULL;
414 MPS_FUNCTRACE(sassc->sc);
419 * If this is a WD controller, determine if the disk should be exposed
420 * to the OS or not. If disk should be exposed, return from this
421 * function without doing anything.
423 if (sc->WD_available && (sc->WD_hide_expose ==
424 MPS_WD_EXPOSE_ALWAYS)) {
429 targ = mpssas_find_target_by_handle(sassc, 0, handle);
431 /* FIXME: what is the action? */
432 /* We don't know about this device? */
433 mps_dprint(sc, MPS_ERROR,
434 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
438 targ->flags |= MPSSAS_TARGET_INREMOVAL;
440 cm = mpssas_alloc_tm(sc);
442 mps_dprint(sc, MPS_ERROR,
443 "%s: command alloc failure\n", __func__);
447 mpssas_rescan_target(sc, targ);
449 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
450 req->DevHandle = targ->handle;
451 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
452 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
454 /* SAS Hard Link Reset / SATA Link Reset */
455 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
459 cm->cm_desc.HighPriority.RequestFlags =
460 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
461 cm->cm_complete = mpssas_remove_volume;
462 cm->cm_complete_data = (void *)(uintptr_t)handle;
463 mps_map_command(sc, cm);
467 * The MPT2 firmware performs debounce on the link to avoid transient link
468 * errors and false removals. When it does decide that link has been lost
469 * and a device need to go away, it expects that the host will perform a
470 * target reset and then an op remove. The reset has the side-effect of
471 * aborting any outstanding requests for the device, which is required for
472 * the op-remove to succeed. It's not clear if the host should check for
473 * the device coming back alive after the reset.
476 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
478 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
479 struct mps_softc *sc;
480 struct mps_command *cm;
481 struct mpssas_target *targ = NULL;
483 MPS_FUNCTRACE(sassc->sc);
487 targ = mpssas_find_target_by_handle(sassc, 0, handle);
489 /* FIXME: what is the action? */
490 /* We don't know about this device? */
491 mps_dprint(sc, MPS_ERROR,
492 "%s : invalid handle 0x%x \n", __func__, handle);
496 targ->flags |= MPSSAS_TARGET_INREMOVAL;
498 cm = mpssas_alloc_tm(sc);
500 mps_dprint(sc, MPS_ERROR,
501 "%s: command alloc failure\n", __func__);
505 mpssas_rescan_target(sc, targ);
507 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
508 memset(req, 0, sizeof(*req));
509 req->DevHandle = htole16(targ->handle);
510 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
511 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
513 /* SAS Hard Link Reset / SATA Link Reset */
514 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
518 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
519 cm->cm_complete = mpssas_remove_device;
520 cm->cm_complete_data = (void *)(uintptr_t)handle;
521 mps_map_command(sc, cm);
525 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
527 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
528 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
529 struct mpssas_target *targ;
530 struct mps_command *next_cm;
535 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
536 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
540 * Currently there should be no way we can hit this case. It only
541 * happens when we have a failure to allocate chain frames, and
542 * task management commands don't have S/G lists.
544 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
545 mps_dprint(sc, MPS_ERROR,
546 "%s: cm_flags = %#x for remove of handle %#04x! "
547 "This should not happen!\n", __func__, tm->cm_flags,
549 mpssas_free_tm(sc, tm);
554 /* XXX retry the remove after the diag reset completes? */
555 mps_dprint(sc, MPS_FAULT,
556 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
557 mpssas_free_tm(sc, tm);
561 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
562 mps_dprint(sc, MPS_FAULT,
563 "IOCStatus = 0x%x while resetting device 0x%x\n",
564 le16toh(reply->IOCStatus), handle);
565 mpssas_free_tm(sc, tm);
569 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
570 le32toh(reply->TerminationCount));
571 mps_free_reply(sc, tm->cm_reply_data);
572 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
574 /* Reuse the existing command */
575 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
576 memset(req, 0, sizeof(*req));
577 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
578 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
579 req->DevHandle = htole16(handle);
581 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
582 tm->cm_complete = mpssas_remove_complete;
583 tm->cm_complete_data = (void *)(uintptr_t)handle;
585 mps_map_command(sc, tm);
587 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
589 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
592 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
593 ccb = tm->cm_complete_data;
594 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
595 mpssas_scsiio_complete(sc, tm);
600 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
602 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
604 struct mpssas_target *targ;
605 struct mpssas_lun *lun;
609 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
610 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
613 * Currently there should be no way we can hit this case. It only
614 * happens when we have a failure to allocate chain frames, and
615 * task management commands don't have S/G lists.
617 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
618 mps_dprint(sc, MPS_XINFO,
619 "%s: cm_flags = %#x for remove of handle %#04x! "
620 "This should not happen!\n", __func__, tm->cm_flags,
622 mpssas_free_tm(sc, tm);
627 /* most likely a chip reset */
628 mps_dprint(sc, MPS_FAULT,
629 "%s NULL reply removing device 0x%04x\n", __func__, handle);
630 mpssas_free_tm(sc, tm);
634 mps_dprint(sc, MPS_XINFO,
635 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
636 handle, le16toh(reply->IOCStatus));
639 * Don't clear target if remove fails because things will get confusing.
640 * Leave the devname and sasaddr intact so that we know to avoid reusing
641 * this target id if possible, and so we can assign the same target id
642 * to this device if it comes back in the future.
644 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
647 targ->encl_handle = 0x0;
648 targ->encl_slot = 0x0;
649 targ->exp_dev_handle = 0x0;
651 targ->linkrate = 0x0;
655 while(!SLIST_EMPTY(&targ->luns)) {
656 lun = SLIST_FIRST(&targ->luns);
657 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
663 mpssas_free_tm(sc, tm);
667 mpssas_register_events(struct mps_softc *sc)
669 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
672 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
673 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
674 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
675 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
676 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
677 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
678 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
679 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
680 setbit(events, MPI2_EVENT_IR_VOLUME);
681 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
682 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
683 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
685 mps_register_events(sc, events, mpssas_evt_handler, NULL,
686 &sc->sassc->mpssas_eh);
692 mps_attach_sas(struct mps_softc *sc)
694 struct mpssas_softc *sassc;
700 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
702 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
706 sassc->targets = malloc(sizeof(struct mpssas_target) *
707 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
708 if(!sassc->targets) {
709 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
717 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
718 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
723 unit = device_get_unit(sc->mps_dev);
724 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
725 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
726 if (sassc->sim == NULL) {
727 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
732 TAILQ_INIT(&sassc->ev_queue);
734 /* Initialize taskqueue for Event Handling */
735 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
736 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
737 taskqueue_thread_enqueue, &sassc->ev_tq);
739 /* Run the task queue with lowest priority */
740 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
741 device_get_nameunit(sc->mps_dev));
746 * XXX There should be a bus for every port on the adapter, but since
747 * we're just going to fake the topology for now, we'll pretend that
748 * everything is just a target on a single bus.
750 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
751 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
758 * Assume that discovery events will start right away.
760 * Hold off boot until discovery is complete.
762 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
763 #if (__FreeBSD_version >= 1000039) || \
764 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
767 xpt_freeze_simq(sassc->sim, 1);
769 sc->sassc->startup_refcount = 0;
771 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
772 sassc->discovery_timeouts = 0;
777 * Register for async events so we can determine the EEDP
778 * capabilities of devices.
780 status = xpt_create_path(&sassc->path, /*periph*/NULL,
781 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
783 if (status != CAM_REQ_CMP) {
784 mps_printf(sc, "Error %#x creating sim path\n", status);
789 #if (__FreeBSD_version >= 1000006) || \
790 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
791 event = AC_ADVINFO_CHANGED;
793 event = AC_FOUND_DEVICE;
795 status = xpt_register_async(event, mpssas_async, sc,
797 if (status != CAM_REQ_CMP) {
798 mps_dprint(sc, MPS_ERROR,
799 "Error %#x registering async handler for "
800 "AC_ADVINFO_CHANGED events\n", status);
801 xpt_free_path(sassc->path);
805 if (status != CAM_REQ_CMP) {
807 * EEDP use is the exception, not the rule.
808 * Warn the user, but do not fail to attach.
810 mps_printf(sc, "EEDP capabilities disabled.\n");
815 mpssas_register_events(sc);
823 mps_detach_sas(struct mps_softc *sc)
825 struct mpssas_softc *sassc;
826 struct mpssas_lun *lun, *lun_tmp;
827 struct mpssas_target *targ;
832 if (sc->sassc == NULL)
836 mps_deregister_events(sc, sassc->mpssas_eh);
839 * Drain and free the event handling taskqueue with the lock
840 * unheld so that any parallel processing tasks drain properly
841 * without deadlocking.
843 if (sassc->ev_tq != NULL)
844 taskqueue_free(sassc->ev_tq);
846 /* Make sure CAM doesn't wedge if we had to bail out early. */
849 /* Deregister our async handler */
850 if (sassc->path != NULL) {
851 xpt_register_async(0, mpssas_async, sc, sassc->path);
852 xpt_free_path(sassc->path);
856 if (sassc->flags & MPSSAS_IN_STARTUP)
857 xpt_release_simq(sassc->sim, 1);
859 if (sassc->sim != NULL) {
860 xpt_bus_deregister(cam_sim_path(sassc->sim));
861 cam_sim_free(sassc->sim, FALSE);
864 sassc->flags |= MPSSAS_SHUTDOWN;
867 if (sassc->devq != NULL)
868 cam_simq_free(sassc->devq);
870 for(i=0; i< sc->facts->MaxTargets ;i++) {
871 targ = &sassc->targets[i];
872 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
876 free(sassc->targets, M_MPT2);
884 mpssas_discovery_end(struct mpssas_softc *sassc)
886 struct mps_softc *sc = sassc->sc;
890 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
891 callout_stop(&sassc->discovery_callout);
896 mpssas_discovery_timeout(void *data)
898 struct mpssas_softc *sassc = data;
899 struct mps_softc *sc;
905 mps_dprint(sc, MPS_INFO,
906 "Timeout waiting for discovery, interrupts may not be working!\n");
907 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
909 /* Poll the hardware for events in case interrupts aren't working */
912 mps_dprint(sassc->sc, MPS_INFO,
913 "Finished polling after discovery timeout at %d\n", ticks);
915 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
916 mpssas_discovery_end(sassc);
918 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
919 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
920 callout_reset(&sassc->discovery_callout,
921 MPSSAS_DISCOVERY_TIMEOUT * hz,
922 mpssas_discovery_timeout, sassc);
923 sassc->discovery_timeouts++;
925 mps_dprint(sassc->sc, MPS_FAULT,
926 "Discovery timed out, continuing.\n");
927 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
928 mpssas_discovery_end(sassc);
936 mpssas_action(struct cam_sim *sim, union ccb *ccb)
938 struct mpssas_softc *sassc;
940 sassc = cam_sim_softc(sim);
942 MPS_FUNCTRACE(sassc->sc);
943 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
944 ccb->ccb_h.func_code);
945 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
947 switch (ccb->ccb_h.func_code) {
950 struct ccb_pathinq *cpi = &ccb->cpi;
952 cpi->version_num = 1;
953 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
954 cpi->target_sprt = 0;
955 #if (__FreeBSD_version >= 1000039) || \
956 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
957 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
959 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
961 cpi->hba_eng_cnt = 0;
962 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
964 cpi->initiator_id = 255;
965 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
966 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
967 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
968 cpi->unit_number = cam_sim_unit(sim);
969 cpi->bus_id = cam_sim_bus(sim);
970 cpi->base_transfer_speed = 150000;
971 cpi->transport = XPORT_SAS;
972 cpi->transport_version = 0;
973 cpi->protocol = PROTO_SCSI;
974 cpi->protocol_version = SCSI_REV_SPC;
975 #if __FreeBSD_version >= 800001
977 * XXX KDM where does this number come from?
979 cpi->maxio = 256 * 1024;
981 cpi->ccb_h.status = CAM_REQ_CMP;
984 case XPT_GET_TRAN_SETTINGS:
986 struct ccb_trans_settings *cts;
987 struct ccb_trans_settings_sas *sas;
988 struct ccb_trans_settings_scsi *scsi;
989 struct mpssas_target *targ;
992 sas = &cts->xport_specific.sas;
993 scsi = &cts->proto_specific.scsi;
995 targ = &sassc->targets[cts->ccb_h.target_id];
996 if (targ->handle == 0x0) {
997 cts->ccb_h.status = CAM_SEL_TIMEOUT;
1001 cts->protocol_version = SCSI_REV_SPC2;
1002 cts->transport = XPORT_SAS;
1003 cts->transport_version = 0;
1005 sas->valid = CTS_SAS_VALID_SPEED;
1006 switch (targ->linkrate) {
1008 sas->bitrate = 150000;
1011 sas->bitrate = 300000;
1014 sas->bitrate = 600000;
1020 cts->protocol = PROTO_SCSI;
1021 scsi->valid = CTS_SCSI_VALID_TQ;
1022 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1024 cts->ccb_h.status = CAM_REQ_CMP;
1027 case XPT_CALC_GEOMETRY:
1028 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1029 ccb->ccb_h.status = CAM_REQ_CMP;
1032 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1033 mpssas_action_resetdev(sassc, ccb);
1038 mps_dprint(sassc->sc, MPS_XINFO,
1039 "mpssas_action faking success for abort or reset\n");
1040 ccb->ccb_h.status = CAM_REQ_CMP;
1043 mpssas_action_scsiio(sassc, ccb);
1045 #if __FreeBSD_version >= 900026
1047 mpssas_action_smpio(sassc, ccb);
1051 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1059 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1060 target_id_t target_id, lun_id_t lun_id)
1062 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1063 struct cam_path *path;
1065 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__,
1066 ac_code, target_id, lun_id);
1068 if (xpt_create_path(&path, NULL,
1069 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1070 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1075 xpt_async(ac_code, path, NULL);
1076 xpt_free_path(path);
1080 mpssas_complete_all_commands(struct mps_softc *sc)
1082 struct mps_command *cm;
1087 mtx_assert(&sc->mps_mtx, MA_OWNED);
1089 /* complete all commands with a NULL reply */
1090 for (i = 1; i < sc->num_reqs; i++) {
1091 cm = &sc->commands[i];
1092 cm->cm_reply = NULL;
1095 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1096 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1098 if (cm->cm_complete != NULL) {
1099 mpssas_log_command(cm, MPS_RECOVERY,
1100 "completing cm %p state %x ccb %p for diag reset\n",
1101 cm, cm->cm_state, cm->cm_ccb);
1103 cm->cm_complete(sc, cm);
1107 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1108 mpssas_log_command(cm, MPS_RECOVERY,
1109 "waking up cm %p state %x ccb %p for diag reset\n",
1110 cm, cm->cm_state, cm->cm_ccb);
1115 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1116 /* this should never happen, but if it does, log */
1117 mpssas_log_command(cm, MPS_RECOVERY,
1118 "cm %p state %x flags 0x%x ccb %p during diag "
1119 "reset\n", cm, cm->cm_state, cm->cm_flags,
1126 mpssas_handle_reinit(struct mps_softc *sc)
1130 /* Go back into startup mode and freeze the simq, so that CAM
1131 * doesn't send any commands until after we've rediscovered all
1132 * targets and found the proper device handles for them.
1134 * After the reset, portenable will trigger discovery, and after all
1135 * discovery-related activities have finished, the simq will be
1138 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1139 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1140 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1141 xpt_freeze_simq(sc->sassc->sim, 1);
1143 /* notify CAM of a bus reset */
1144 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1147 /* complete and cleanup after all outstanding commands */
1148 mpssas_complete_all_commands(sc);
1150 mps_dprint(sc, MPS_INIT,
1151 "%s startup %u tm %u after command completion\n",
1152 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1155 * The simq was explicitly frozen above, so set the refcount to 0.
1156 * The simq will be explicitly released after port enable completes.
1158 sc->sassc->startup_refcount = 0;
1160 /* zero all the target handles, since they may change after the
1161 * reset, and we have to rediscover all the targets and use the new
1164 for (i = 0; i < sc->facts->MaxTargets; i++) {
1165 if (sc->sassc->targets[i].outstanding != 0)
1166 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1167 i, sc->sassc->targets[i].outstanding);
1168 sc->sassc->targets[i].handle = 0x0;
1169 sc->sassc->targets[i].exp_dev_handle = 0x0;
1170 sc->sassc->targets[i].outstanding = 0;
1171 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1176 mpssas_tm_timeout(void *data)
1178 struct mps_command *tm = data;
1179 struct mps_softc *sc = tm->cm_sc;
1181 mtx_assert(&sc->mps_mtx, MA_OWNED);
1183 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1184 "task mgmt %p timed out\n", tm);
1189 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1191 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1192 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1193 unsigned int cm_count = 0;
1194 struct mps_command *cm;
1195 struct mpssas_target *targ;
1197 callout_stop(&tm->cm_callout);
1199 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1200 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1204 * Currently there should be no way we can hit this case. It only
1205 * happens when we have a failure to allocate chain frames, and
1206 * task management commands don't have S/G lists.
1207 * XXXSL So should it be an assertion?
1209 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1210 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1211 "This should not happen!\n", __func__, tm->cm_flags);
1212 mpssas_free_tm(sc, tm);
1216 if (reply == NULL) {
1217 mpssas_log_command(tm, MPS_RECOVERY,
1218 "NULL reset reply for tm %p\n", tm);
1219 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1220 /* this completion was due to a reset, just cleanup */
1221 targ->flags &= ~MPSSAS_TARGET_INRESET;
1223 mpssas_free_tm(sc, tm);
1226 /* we should have gotten a reply. */
1232 mpssas_log_command(tm, MPS_RECOVERY,
1233 "logical unit reset status 0x%x code 0x%x count %u\n",
1234 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1235 le32toh(reply->TerminationCount));
1237 /* See if there are any outstanding commands for this LUN.
1238 * This could be made more efficient by using a per-LU data
1239 * structure of some sort.
1241 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1242 if (cm->cm_lun == tm->cm_lun)
1246 if (cm_count == 0) {
1247 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1248 "logical unit %u finished recovery after reset\n",
1251 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1254 /* we've finished recovery for this logical unit. check and
1255 * see if some other logical unit has a timedout command
1256 * that needs to be processed.
1258 cm = TAILQ_FIRST(&targ->timedout_commands);
1260 mpssas_send_abort(sc, tm, cm);
1264 mpssas_free_tm(sc, tm);
1268 /* if we still have commands for this LUN, the reset
1269 * effectively failed, regardless of the status reported.
1270 * Escalate to a target reset.
1272 mpssas_log_command(tm, MPS_RECOVERY,
1273 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1275 mpssas_send_reset(sc, tm,
1276 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1281 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1283 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1284 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1285 struct mpssas_target *targ;
1287 callout_stop(&tm->cm_callout);
1289 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1290 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1294 * Currently there should be no way we can hit this case. It only
1295 * happens when we have a failure to allocate chain frames, and
1296 * task management commands don't have S/G lists.
1298 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1299 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1300 "This should not happen!\n", __func__, tm->cm_flags);
1301 mpssas_free_tm(sc, tm);
1305 if (reply == NULL) {
1306 mpssas_log_command(tm, MPS_RECOVERY,
1307 "NULL reset reply for tm %p\n", tm);
1308 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1309 /* this completion was due to a reset, just cleanup */
1310 targ->flags &= ~MPSSAS_TARGET_INRESET;
1312 mpssas_free_tm(sc, tm);
1315 /* we should have gotten a reply. */
1321 mpssas_log_command(tm, MPS_RECOVERY,
1322 "target reset status 0x%x code 0x%x count %u\n",
1323 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1324 le32toh(reply->TerminationCount));
1326 targ->flags &= ~MPSSAS_TARGET_INRESET;
1328 if (targ->outstanding == 0) {
1329 /* we've finished recovery for this target and all
1330 * of its logical units.
1332 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1333 "recovery finished after target reset\n");
1335 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1339 mpssas_free_tm(sc, tm);
1342 /* after a target reset, if this target still has
1343 * outstanding commands, the reset effectively failed,
1344 * regardless of the status reported. escalate.
1346 mpssas_log_command(tm, MPS_RECOVERY,
1347 "target reset complete for tm %p, but still have %u command(s)\n",
1348 tm, targ->outstanding);
1353 #define MPS_RESET_TIMEOUT 30
1356 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1358 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1359 struct mpssas_target *target;
1362 target = tm->cm_targ;
1363 if (target->handle == 0) {
1364 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1365 __func__, target->tid);
1369 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1370 req->DevHandle = htole16(target->handle);
1371 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1372 req->TaskType = type;
1374 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1375 /* XXX Need to handle invalid LUNs */
1376 MPS_SET_LUN(req->LUN, tm->cm_lun);
1377 tm->cm_targ->logical_unit_resets++;
1378 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1379 "sending logical unit reset\n");
1380 tm->cm_complete = mpssas_logical_unit_reset_complete;
1382 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1383 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1384 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1385 tm->cm_targ->target_resets++;
1386 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1387 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1388 "sending target reset\n");
1389 tm->cm_complete = mpssas_target_reset_complete;
1392 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1397 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1398 tm->cm_complete_data = (void *)tm;
1400 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1401 mpssas_tm_timeout, tm);
1403 err = mps_map_command(sc, tm);
1405 mpssas_log_command(tm, MPS_RECOVERY,
1406 "error %d sending reset type %u\n",
1414 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1416 struct mps_command *cm;
1417 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1418 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1419 struct mpssas_target *targ;
1421 callout_stop(&tm->cm_callout);
1423 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1424 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1428 * Currently there should be no way we can hit this case. It only
1429 * happens when we have a failure to allocate chain frames, and
1430 * task management commands don't have S/G lists.
1432 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1433 mpssas_log_command(tm, MPS_RECOVERY,
1434 "cm_flags = %#x for abort %p TaskMID %u!\n",
1435 tm->cm_flags, tm, le16toh(req->TaskMID));
1436 mpssas_free_tm(sc, tm);
1440 if (reply == NULL) {
1441 mpssas_log_command(tm, MPS_RECOVERY,
1442 "NULL abort reply for tm %p TaskMID %u\n",
1443 tm, le16toh(req->TaskMID));
1444 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1445 /* this completion was due to a reset, just cleanup */
1447 mpssas_free_tm(sc, tm);
1450 /* we should have gotten a reply. */
1456 mpssas_log_command(tm, MPS_RECOVERY,
1457 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1458 le16toh(req->TaskMID),
1459 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1460 le32toh(reply->TerminationCount));
1462 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1464 /* if there are no more timedout commands, we're done with
1465 * error recovery for this target.
1467 mpssas_log_command(tm, MPS_RECOVERY,
1468 "finished recovery after aborting TaskMID %u\n",
1469 le16toh(req->TaskMID));
1472 mpssas_free_tm(sc, tm);
1474 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1475 /* abort success, but we have more timedout commands to abort */
1476 mpssas_log_command(tm, MPS_RECOVERY,
1477 "continuing recovery after aborting TaskMID %u\n",
1478 le16toh(req->TaskMID));
1480 mpssas_send_abort(sc, tm, cm);
1483 /* we didn't get a command completion, so the abort
1484 * failed as far as we're concerned. escalate.
1486 mpssas_log_command(tm, MPS_RECOVERY,
1487 "abort failed for TaskMID %u tm %p\n",
1488 le16toh(req->TaskMID), tm);
1490 mpssas_send_reset(sc, tm,
1491 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1495 #define MPS_ABORT_TIMEOUT 5
1498 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1500 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1501 struct mpssas_target *targ;
1505 if (targ->handle == 0) {
1506 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1507 __func__, cm->cm_ccb->ccb_h.target_id);
1511 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1512 "Aborting command %p\n", cm);
1514 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1515 req->DevHandle = htole16(targ->handle);
1516 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1517 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1519 /* XXX Need to handle invalid LUNs */
1520 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1522 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1525 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1526 tm->cm_complete = mpssas_abort_complete;
1527 tm->cm_complete_data = (void *)tm;
1528 tm->cm_targ = cm->cm_targ;
1529 tm->cm_lun = cm->cm_lun;
1531 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1532 mpssas_tm_timeout, tm);
1536 err = mps_map_command(sc, tm);
1538 mpssas_log_command(tm, MPS_RECOVERY,
1539 "error %d sending abort for cm %p SMID %u\n",
1540 err, cm, req->TaskMID);
1546 mpssas_scsiio_timeout(void *data)
1548 struct mps_softc *sc;
1549 struct mps_command *cm;
1550 struct mpssas_target *targ;
1552 cm = (struct mps_command *)data;
1556 mtx_assert(&sc->mps_mtx, MA_OWNED);
1558 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1561 * Run the interrupt handler to make sure it's not pending. This
1562 * isn't perfect because the command could have already completed
1563 * and been re-used, though this is unlikely.
1565 mps_intr_locked(sc);
1566 if (cm->cm_state == MPS_CM_STATE_FREE) {
1567 mpssas_log_command(cm, MPS_XINFO,
1568 "SCSI command %p almost timed out\n", cm);
1572 if (cm->cm_ccb == NULL) {
1573 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1577 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1583 /* XXX first, check the firmware state, to see if it's still
1584 * operational. if not, do a diag reset.
1587 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1588 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1589 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1591 if (targ->tm != NULL) {
1592 /* target already in recovery, just queue up another
1593 * timedout command to be processed later.
1595 mps_dprint(sc, MPS_RECOVERY,
1596 "queued timedout cm %p for processing by tm %p\n",
1599 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1600 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1603 /* start recovery by aborting the first timedout command */
1604 mpssas_send_abort(sc, targ->tm, cm);
1607 /* XXX queue this target up for recovery once a TM becomes
1608 * available. The firmware only has a limited number of
1609 * HighPriority credits for the high priority requests used
1610 * for task management, and we ran out.
1612 * Isilon: don't worry about this for now, since we have
1613 * more credits than disks in an enclosure, and limit
1614 * ourselves to one TM per target for recovery.
1616 mps_dprint(sc, MPS_RECOVERY,
1617 "timedout cm %p failed to allocate a tm\n", cm);
1623 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1625 MPI2_SCSI_IO_REQUEST *req;
1626 struct ccb_scsiio *csio;
1627 struct mps_softc *sc;
1628 struct mpssas_target *targ;
1629 struct mpssas_lun *lun;
1630 struct mps_command *cm;
1631 uint8_t i, lba_byte, *ref_tag_addr;
1632 uint16_t eedp_flags;
1633 uint32_t mpi_control;
1637 mtx_assert(&sc->mps_mtx, MA_OWNED);
1640 targ = &sassc->targets[csio->ccb_h.target_id];
1641 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1642 if (targ->handle == 0x0) {
1643 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1644 __func__, csio->ccb_h.target_id);
1645 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1649 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1650 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1651 "supported %u\n", __func__, csio->ccb_h.target_id);
1652 csio->ccb_h.status = CAM_TID_INVALID;
1657 * Sometimes, it is possible to get a command that is not "In
1658 * Progress" and was actually aborted by the upper layer. Check for
1659 * this here and complete the command without error.
1661 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1662 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1663 "target %u\n", __func__, csio->ccb_h.target_id);
1668 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1669 * that the volume has timed out. We want volumes to be enumerated
1670 * until they are deleted/removed, not just failed.
1672 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1673 if (targ->devinfo == 0)
1674 csio->ccb_h.status = CAM_REQ_CMP;
1676 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1681 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1682 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1683 csio->ccb_h.status = CAM_TID_INVALID;
1688 cm = mps_alloc_command(sc);
1690 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1691 xpt_freeze_simq(sassc->sim, 1);
1692 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1694 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1695 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1700 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1701 bzero(req, sizeof(*req));
1702 req->DevHandle = htole16(targ->handle);
1703 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1705 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1706 req->SenseBufferLength = MPS_SENSE_LEN;
1708 req->ChainOffset = 0;
1709 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1714 req->DataLength = htole32(csio->dxfer_len);
1715 req->BidirectionalDataLength = 0;
1716 req->IoFlags = htole16(csio->cdb_len);
1719 /* Note: BiDirectional transfers are not supported */
1720 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1722 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1723 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1726 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1727 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1731 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1735 if (csio->cdb_len == 32)
1736 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1738 * It looks like the hardware doesn't require an explicit tag
1739 * number for each transaction. SAM Task Management not supported
1742 switch (csio->tag_action) {
1743 case MSG_HEAD_OF_Q_TAG:
1744 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1746 case MSG_ORDERED_Q_TAG:
1747 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1750 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1752 case CAM_TAG_ACTION_NONE:
1753 case MSG_SIMPLE_Q_TAG:
1755 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1758 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1759 req->Control = htole32(mpi_control);
1760 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1761 mps_free_command(sc, cm);
1762 ccb->ccb_h.status = CAM_LUN_INVALID;
1767 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1768 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1770 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1771 req->IoFlags = htole16(csio->cdb_len);
1774 * Check if EEDP is supported and enabled. If it is then check if the
1775 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1776 * is formatted for EEDP support. If all of this is true, set CDB up
1777 * for EEDP transfer.
1779 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1780 if (sc->eedp_enabled && eedp_flags) {
1781 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1782 if (lun->lun_id == csio->ccb_h.target_lun) {
1787 if ((lun != NULL) && (lun->eedp_formatted)) {
1788 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1789 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1790 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1791 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1792 req->EEDPFlags = htole16(eedp_flags);
1795 * If CDB less than 32, fill in Primary Ref Tag with
1796 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1797 * already there. Also, set protection bit. FreeBSD
1798 * currently does not support CDBs bigger than 16, but
1799 * the code doesn't hurt, and will be here for the
1802 if (csio->cdb_len != 32) {
1803 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1804 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1805 PrimaryReferenceTag;
1806 for (i = 0; i < 4; i++) {
1808 req->CDB.CDB32[lba_byte + i];
1811 req->CDB.EEDP32.PrimaryReferenceTag =
1812 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1813 req->CDB.EEDP32.PrimaryApplicationTagMask =
1815 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1819 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1820 req->EEDPFlags = htole16(eedp_flags);
1821 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1827 cm->cm_length = csio->dxfer_len;
1828 if (cm->cm_length != 0) {
1830 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1834 cm->cm_sge = &req->SGL;
1835 cm->cm_sglsize = (32 - 24) * 4;
1836 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1837 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1838 cm->cm_complete = mpssas_scsiio_complete;
1839 cm->cm_complete_data = ccb;
1841 cm->cm_lun = csio->ccb_h.target_lun;
1845 * If HBA is a WD and the command is not for a retry, try to build a
1846 * direct I/O message. If failed, or the command is for a retry, send
1847 * the I/O to the IR volume itself.
1849 if (sc->WD_valid_config) {
1850 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1851 mpssas_direct_drive_io(sassc, cm, ccb);
1853 ccb->ccb_h.status = CAM_REQ_INPROG;
1857 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1858 mpssas_scsiio_timeout, cm);
1861 targ->outstanding++;
1862 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1863 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1865 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1866 __func__, cm, ccb, targ->outstanding);
1868 mps_map_command(sc, cm);
1873 mps_response_code(struct mps_softc *sc, u8 response_code)
1877 switch (response_code) {
1878 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1879 desc = "task management request completed";
1881 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1882 desc = "invalid frame";
1884 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1885 desc = "task management request not supported";
1887 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1888 desc = "task management request failed";
1890 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1891 desc = "task management request succeeded";
1893 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1894 desc = "invalid lun";
1897 desc = "overlapped tag attempted";
1899 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1900 desc = "task queued, however not sent to target";
1906 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1907 response_code, desc);
1910 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1913 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1914 Mpi2SCSIIOReply_t *mpi_reply)
1918 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1919 MPI2_IOCSTATUS_MASK;
1920 u8 scsi_state = mpi_reply->SCSIState;
1921 u8 scsi_status = mpi_reply->SCSIStatus;
1922 char *desc_ioc_state = NULL;
1923 char *desc_scsi_status = NULL;
1924 char *desc_scsi_state = sc->tmp_string;
1925 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1927 if (log_info == 0x31170000)
1930 switch (ioc_status) {
1931 case MPI2_IOCSTATUS_SUCCESS:
1932 desc_ioc_state = "success";
1934 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1935 desc_ioc_state = "invalid function";
1937 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1938 desc_ioc_state = "scsi recovered error";
1940 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1941 desc_ioc_state = "scsi invalid dev handle";
1943 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1944 desc_ioc_state = "scsi device not there";
1946 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1947 desc_ioc_state = "scsi data overrun";
1949 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1950 desc_ioc_state = "scsi data underrun";
1952 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1953 desc_ioc_state = "scsi io data error";
1955 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1956 desc_ioc_state = "scsi protocol error";
1958 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1959 desc_ioc_state = "scsi task terminated";
1961 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1962 desc_ioc_state = "scsi residual mismatch";
1964 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1965 desc_ioc_state = "scsi task mgmt failed";
1967 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1968 desc_ioc_state = "scsi ioc terminated";
1970 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1971 desc_ioc_state = "scsi ext terminated";
1973 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1974 desc_ioc_state = "eedp guard error";
1976 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1977 desc_ioc_state = "eedp ref tag error";
1979 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1980 desc_ioc_state = "eedp app tag error";
1983 desc_ioc_state = "unknown";
1987 switch (scsi_status) {
1988 case MPI2_SCSI_STATUS_GOOD:
1989 desc_scsi_status = "good";
1991 case MPI2_SCSI_STATUS_CHECK_CONDITION:
1992 desc_scsi_status = "check condition";
1994 case MPI2_SCSI_STATUS_CONDITION_MET:
1995 desc_scsi_status = "condition met";
1997 case MPI2_SCSI_STATUS_BUSY:
1998 desc_scsi_status = "busy";
2000 case MPI2_SCSI_STATUS_INTERMEDIATE:
2001 desc_scsi_status = "intermediate";
2003 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2004 desc_scsi_status = "intermediate condmet";
2006 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2007 desc_scsi_status = "reservation conflict";
2009 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2010 desc_scsi_status = "command terminated";
2012 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2013 desc_scsi_status = "task set full";
2015 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2016 desc_scsi_status = "aca active";
2018 case MPI2_SCSI_STATUS_TASK_ABORTED:
2019 desc_scsi_status = "task aborted";
2022 desc_scsi_status = "unknown";
2026 desc_scsi_state[0] = '\0';
2028 desc_scsi_state = " ";
2029 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2030 strcat(desc_scsi_state, "response info ");
2031 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2032 strcat(desc_scsi_state, "state terminated ");
2033 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2034 strcat(desc_scsi_state, "no status ");
2035 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2036 strcat(desc_scsi_state, "autosense failed ");
2037 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2038 strcat(desc_scsi_state, "autosense valid ");
2040 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2041 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2042 /* We can add more detail about underflow data here
2045 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2046 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2047 desc_scsi_state, scsi_state);
2049 if (sc->mps_debug & MPS_XINFO &&
2050 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2051 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2052 scsi_sense_print(csio);
2053 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2056 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2057 response_info = le32toh(mpi_reply->ResponseInfo);
2058 response_bytes = (u8 *)&response_info;
2059 mps_response_code(sc,response_bytes[0]);
2064 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2066 MPI2_SCSI_IO_REPLY *rep;
2068 struct ccb_scsiio *csio;
2069 struct mpssas_softc *sassc;
2070 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2071 u8 *TLR_bits, TLR_on;
2076 mps_dprint(sc, MPS_TRACE,
2077 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2078 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2079 cm->cm_targ->outstanding);
2081 callout_stop(&cm->cm_callout);
2082 mtx_assert(&sc->mps_mtx, MA_OWNED);
2085 ccb = cm->cm_complete_data;
2087 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2089 * XXX KDM if the chain allocation fails, does it matter if we do
2090 * the sync and unload here? It is simpler to do it in every case,
2091 * assuming it doesn't cause problems.
2093 if (cm->cm_data != NULL) {
2094 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2095 dir = BUS_DMASYNC_POSTREAD;
2096 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2097 dir = BUS_DMASYNC_POSTWRITE;
2098 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2099 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2102 cm->cm_targ->completed++;
2103 cm->cm_targ->outstanding--;
2104 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2105 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2107 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2108 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2109 if (cm->cm_reply != NULL)
2110 mpssas_log_command(cm, MPS_RECOVERY,
2111 "completed timedout cm %p ccb %p during recovery "
2112 "ioc %x scsi %x state %x xfer %u\n",
2114 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2115 le32toh(rep->TransferCount));
2117 mpssas_log_command(cm, MPS_RECOVERY,
2118 "completed timedout cm %p ccb %p during recovery\n",
2120 } else if (cm->cm_targ->tm != NULL) {
2121 if (cm->cm_reply != NULL)
2122 mpssas_log_command(cm, MPS_RECOVERY,
2123 "completed cm %p ccb %p during recovery "
2124 "ioc %x scsi %x state %x xfer %u\n",
2126 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2127 le32toh(rep->TransferCount));
2129 mpssas_log_command(cm, MPS_RECOVERY,
2130 "completed cm %p ccb %p during recovery\n",
2132 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2133 mpssas_log_command(cm, MPS_RECOVERY,
2134 "reset completed cm %p ccb %p\n",
2138 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2140 * We ran into an error after we tried to map the command,
2141 * so we're getting a callback without queueing the command
2142 * to the hardware. So we set the status here, and it will
2143 * be retained below. We'll go through the "fast path",
2144 * because there can be no reply when we haven't actually
2145 * gone out to the hardware.
2147 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2150 * Currently the only error included in the mask is
2151 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2152 * chain frames. We need to freeze the queue until we get
2153 * a command that completed without this error, which will
2154 * hopefully have some chain frames attached that we can
2155 * use. If we wanted to get smarter about it, we would
2156 * only unfreeze the queue in this condition when we're
2157 * sure that we're getting some chain frames back. That's
2158 * probably unnecessary.
2160 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2161 xpt_freeze_simq(sassc->sim, 1);
2162 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2163 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2164 "freezing SIM queue\n");
2168 /* Take the fast path to completion */
2169 if (cm->cm_reply == NULL) {
2170 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2171 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2172 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2174 ccb->ccb_h.status = CAM_REQ_CMP;
2175 ccb->csio.scsi_status = SCSI_STATUS_OK;
2177 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2178 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2179 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2180 mps_dprint(sc, MPS_XINFO,
2181 "Unfreezing SIM queue\n");
2186 * There are two scenarios where the status won't be
2187 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2188 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2190 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2192 * Freeze the dev queue so that commands are
2193 * executed in the correct order with after error
2196 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2197 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2199 mps_free_command(sc, cm);
2204 mpssas_log_command(cm, MPS_XINFO,
2205 "ioc %x scsi %x state %x xfer %u\n",
2206 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2207 le32toh(rep->TransferCount));
2210 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2211 * Volume if an error occurred (normal I/O retry). Use the original
2212 * CCB, but set a flag that this will be a retry so that it's sent to
2213 * the original volume. Free the command but reuse the CCB.
2215 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2216 mps_free_command(sc, cm);
2217 ccb->ccb_h.status = MPS_WD_RETRY;
2218 mpssas_action_scsiio(sassc, ccb);
2222 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2223 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2224 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2226 case MPI2_IOCSTATUS_SUCCESS:
2227 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2229 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2230 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2231 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2233 /* Completion failed at the transport level. */
2234 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2235 MPI2_SCSI_STATE_TERMINATED)) {
2236 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2240 /* In a modern packetized environment, an autosense failure
2241 * implies that there's not much else that can be done to
2242 * recover the command.
2244 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2245 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2250 * CAM doesn't care about SAS Response Info data, but if this is
2251 * the state check if TLR should be done. If not, clear the
2252 * TLR_bits for the target.
2254 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2255 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2256 MPS_SCSI_RI_INVALID_FRAME)) {
2257 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2258 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2262 * Intentionally override the normal SCSI status reporting
2263 * for these two cases. These are likely to happen in a
2264 * multi-initiator environment, and we want to make sure that
2265 * CAM retries these commands rather than fail them.
2267 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2268 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2269 ccb->ccb_h.status = CAM_REQ_ABORTED;
2273 /* Handle normal status and sense */
2274 csio->scsi_status = rep->SCSIStatus;
2275 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2276 ccb->ccb_h.status = CAM_REQ_CMP;
2278 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2280 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2281 int sense_len, returned_sense_len;
2283 returned_sense_len = min(le32toh(rep->SenseCount),
2284 sizeof(struct scsi_sense_data));
2285 if (returned_sense_len < ccb->csio.sense_len)
2286 ccb->csio.sense_resid = ccb->csio.sense_len -
2289 ccb->csio.sense_resid = 0;
2291 sense_len = min(returned_sense_len,
2292 ccb->csio.sense_len - ccb->csio.sense_resid);
2293 bzero(&ccb->csio.sense_data,
2294 sizeof(ccb->csio.sense_data));
2295 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2296 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2300 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2301 * and it's page code 0 (Supported Page List), and there is
2302 * inquiry data, and this is for a sequential access device, and
2303 * the device is an SSP target, and TLR is supported by the
2304 * controller, turn the TLR_bits value ON if page 0x90 is
2307 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2308 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2309 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2310 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2311 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2312 T_SEQUENTIAL) && (sc->control_TLR) &&
2313 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2314 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2315 vpd_list = (struct scsi_vpd_supported_page_list *)
2317 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2319 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2320 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2321 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2322 csio->cdb_io.cdb_bytes[4];
2323 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2324 if (vpd_list->list[i] == 0x90) {
2331 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2332 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2334 * If devinfo is 0 this will be a volume. In that case don't
2335 * tell CAM that the volume is not there. We want volumes to
2336 * be enumerated until they are deleted/removed, not just
2339 if (cm->cm_targ->devinfo == 0)
2340 ccb->ccb_h.status = CAM_REQ_CMP;
2342 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2344 case MPI2_IOCSTATUS_INVALID_SGL:
2345 mps_print_scsiio_cmd(sc, cm);
2346 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2348 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2350 * This is one of the responses that comes back when an I/O
2351 * has been aborted. If it is because of a timeout that we
2352 * initiated, just set the status to CAM_CMD_TIMEOUT.
2353 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2354 * command is the same (it gets retried, subject to the
2355 * retry counter), the only difference is what gets printed
2358 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2359 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2361 ccb->ccb_h.status = CAM_REQ_ABORTED;
2363 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2364 /* resid is ignored for this condition */
2366 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2368 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2369 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2371 * Since these are generally external (i.e. hopefully
2372 * transient transport-related) errors, retry these without
2373 * decrementing the retry count.
2375 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2376 mpssas_log_command(cm, MPS_INFO,
2377 "terminated ioc %x scsi %x state %x xfer %u\n",
2378 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2379 le32toh(rep->TransferCount));
2381 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2382 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2383 case MPI2_IOCSTATUS_INVALID_VPID:
2384 case MPI2_IOCSTATUS_INVALID_FIELD:
2385 case MPI2_IOCSTATUS_INVALID_STATE:
2386 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2387 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2388 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2389 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2390 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2392 mpssas_log_command(cm, MPS_XINFO,
2393 "completed ioc %x scsi %x state %x xfer %u\n",
2394 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2395 le32toh(rep->TransferCount));
2396 csio->resid = cm->cm_length;
2397 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2401 mps_sc_failed_io_info(sc,csio,rep);
2403 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2404 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2405 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2406 mps_dprint(sc, MPS_XINFO, "Command completed, "
2407 "unfreezing SIM queue\n");
2410 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2411 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2412 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2415 mps_free_command(sc, cm);
2419 /* All Request reached here are Endian safe */
2421 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2423 pMpi2SCSIIORequest_t pIO_req;
2424 struct mps_softc *sc = sassc->sc;
2426 uint32_t physLBA, stripe_offset, stripe_unit;
2427 uint32_t io_size, column;
2428 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2431 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2432 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2433 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2434 * bit different than the 10/16 CDBs, handle them separately.
2436 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2437 CDB = pIO_req->CDB.CDB32;
2440 * Handle 6 byte CDBs.
2442 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2443 (CDB[0] == WRITE_6))) {
2445 * Get the transfer size in blocks.
2447 io_size = (cm->cm_length >> sc->DD_block_exponent);
2450 * Get virtual LBA given in the CDB.
2452 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2453 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2456 * Check that LBA range for I/O does not exceed volume's
2459 if ((virtLBA + (uint64_t)io_size - 1) <=
2462 * Check if the I/O crosses a stripe boundary. If not,
2463 * translate the virtual LBA to a physical LBA and set
2464 * the DevHandle for the PhysDisk to be used. If it
2465 * does cross a boundry, do normal I/O. To get the
2466 * right DevHandle to use, get the map number for the
2467 * column, then use that map number to look up the
2468 * DevHandle of the PhysDisk.
2470 stripe_offset = (uint32_t)virtLBA &
2471 (sc->DD_stripe_size - 1);
2472 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2473 physLBA = (uint32_t)virtLBA >>
2474 sc->DD_stripe_exponent;
2475 stripe_unit = physLBA / sc->DD_num_phys_disks;
2476 column = physLBA % sc->DD_num_phys_disks;
2477 pIO_req->DevHandle =
2478 htole16(sc->DD_column_map[column].dev_handle);
2479 /* ???? Is this endian safe*/
2480 cm->cm_desc.SCSIIO.DevHandle =
2483 physLBA = (stripe_unit <<
2484 sc->DD_stripe_exponent) + stripe_offset;
2485 ptrLBA = &pIO_req->CDB.CDB32[1];
2486 physLBA_byte = (uint8_t)(physLBA >> 16);
2487 *ptrLBA = physLBA_byte;
2488 ptrLBA = &pIO_req->CDB.CDB32[2];
2489 physLBA_byte = (uint8_t)(physLBA >> 8);
2490 *ptrLBA = physLBA_byte;
2491 ptrLBA = &pIO_req->CDB.CDB32[3];
2492 physLBA_byte = (uint8_t)physLBA;
2493 *ptrLBA = physLBA_byte;
2496 * Set flag that Direct Drive I/O is
2499 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2506 * Handle 10, 12 or 16 byte CDBs.
2508 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2509 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2510 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2511 (CDB[0] == WRITE_12))) {
2513 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2514 * are 0. If not, this is accessing beyond 2TB so handle it in
2515 * the else section. 10-byte and 12-byte CDB's are OK.
2516 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2517 * ready to accept 12byte CDB for Direct IOs.
2519 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2520 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2521 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2523 * Get the transfer size in blocks.
2525 io_size = (cm->cm_length >> sc->DD_block_exponent);
2528 * Get virtual LBA. Point to correct lower 4 bytes of
2529 * LBA in the CDB depending on command.
2531 lba_idx = ((CDB[0] == READ_12) ||
2532 (CDB[0] == WRITE_12) ||
2533 (CDB[0] == READ_10) ||
2534 (CDB[0] == WRITE_10))? 2 : 6;
2535 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2536 ((uint64_t)CDB[lba_idx + 1] << 16) |
2537 ((uint64_t)CDB[lba_idx + 2] << 8) |
2538 (uint64_t)CDB[lba_idx + 3];
2541 * Check that LBA range for I/O does not exceed volume's
2544 if ((virtLBA + (uint64_t)io_size - 1) <=
2547 * Check if the I/O crosses a stripe boundary.
2548 * If not, translate the virtual LBA to a
2549 * physical LBA and set the DevHandle for the
2550 * PhysDisk to be used. If it does cross a
2551 * boundry, do normal I/O. To get the right
2552 * DevHandle to use, get the map number for the
2553 * column, then use that map number to look up
2554 * the DevHandle of the PhysDisk.
2556 stripe_offset = (uint32_t)virtLBA &
2557 (sc->DD_stripe_size - 1);
2558 if ((stripe_offset + io_size) <=
2559 sc->DD_stripe_size) {
2560 physLBA = (uint32_t)virtLBA >>
2561 sc->DD_stripe_exponent;
2562 stripe_unit = physLBA /
2563 sc->DD_num_phys_disks;
2565 sc->DD_num_phys_disks;
2566 pIO_req->DevHandle =
2567 htole16(sc->DD_column_map[column].
2569 cm->cm_desc.SCSIIO.DevHandle =
2572 physLBA = (stripe_unit <<
2573 sc->DD_stripe_exponent) +
2576 &pIO_req->CDB.CDB32[lba_idx];
2577 physLBA_byte = (uint8_t)(physLBA >> 24);
2578 *ptrLBA = physLBA_byte;
2580 &pIO_req->CDB.CDB32[lba_idx + 1];
2581 physLBA_byte = (uint8_t)(physLBA >> 16);
2582 *ptrLBA = physLBA_byte;
2584 &pIO_req->CDB.CDB32[lba_idx + 2];
2585 physLBA_byte = (uint8_t)(physLBA >> 8);
2586 *ptrLBA = physLBA_byte;
2588 &pIO_req->CDB.CDB32[lba_idx + 3];
2589 physLBA_byte = (uint8_t)physLBA;
2590 *ptrLBA = physLBA_byte;
2593 * Set flag that Direct Drive I/O is
2596 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2601 * 16-byte CDB and the upper 4 bytes of the CDB are not
2602 * 0. Get the transfer size in blocks.
2604 io_size = (cm->cm_length >> sc->DD_block_exponent);
2609 virtLBA = ((uint64_t)CDB[2] << 54) |
2610 ((uint64_t)CDB[3] << 48) |
2611 ((uint64_t)CDB[4] << 40) |
2612 ((uint64_t)CDB[5] << 32) |
2613 ((uint64_t)CDB[6] << 24) |
2614 ((uint64_t)CDB[7] << 16) |
2615 ((uint64_t)CDB[8] << 8) |
2619 * Check that LBA range for I/O does not exceed volume's
2622 if ((virtLBA + (uint64_t)io_size - 1) <=
2625 * Check if the I/O crosses a stripe boundary.
2626 * If not, translate the virtual LBA to a
2627 * physical LBA and set the DevHandle for the
2628 * PhysDisk to be used. If it does cross a
2629 * boundry, do normal I/O. To get the right
2630 * DevHandle to use, get the map number for the
2631 * column, then use that map number to look up
2632 * the DevHandle of the PhysDisk.
2634 stripe_offset = (uint32_t)virtLBA &
2635 (sc->DD_stripe_size - 1);
2636 if ((stripe_offset + io_size) <=
2637 sc->DD_stripe_size) {
2638 physLBA = (uint32_t)(virtLBA >>
2639 sc->DD_stripe_exponent);
2640 stripe_unit = physLBA /
2641 sc->DD_num_phys_disks;
2643 sc->DD_num_phys_disks;
2644 pIO_req->DevHandle =
2645 htole16(sc->DD_column_map[column].
2647 cm->cm_desc.SCSIIO.DevHandle =
2650 physLBA = (stripe_unit <<
2651 sc->DD_stripe_exponent) +
2655 * Set upper 4 bytes of LBA to 0. We
2656 * assume that the phys disks are less
2657 * than 2 TB's in size. Then, set the
2660 pIO_req->CDB.CDB32[2] = 0;
2661 pIO_req->CDB.CDB32[3] = 0;
2662 pIO_req->CDB.CDB32[4] = 0;
2663 pIO_req->CDB.CDB32[5] = 0;
2664 ptrLBA = &pIO_req->CDB.CDB32[6];
2665 physLBA_byte = (uint8_t)(physLBA >> 24);
2666 *ptrLBA = physLBA_byte;
2667 ptrLBA = &pIO_req->CDB.CDB32[7];
2668 physLBA_byte = (uint8_t)(physLBA >> 16);
2669 *ptrLBA = physLBA_byte;
2670 ptrLBA = &pIO_req->CDB.CDB32[8];
2671 physLBA_byte = (uint8_t)(physLBA >> 8);
2672 *ptrLBA = physLBA_byte;
2673 ptrLBA = &pIO_req->CDB.CDB32[9];
2674 physLBA_byte = (uint8_t)physLBA;
2675 *ptrLBA = physLBA_byte;
2678 * Set flag that Direct Drive I/O is
2681 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2688 #if __FreeBSD_version >= 900026
2690 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2692 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2693 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2697 ccb = cm->cm_complete_data;
2700 * Currently there should be no way we can hit this case. It only
2701 * happens when we have a failure to allocate chain frames, and SMP
2702 * commands require two S/G elements only. That should be handled
2703 * in the standard request size.
2705 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2706 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2707 __func__, cm->cm_flags);
2708 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2712 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2714 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2715 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2719 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2720 sasaddr = le32toh(req->SASAddress.Low);
2721 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2723 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2724 MPI2_IOCSTATUS_SUCCESS ||
2725 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2726 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2727 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2728 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2732 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2733 "%#jx completed successfully\n", __func__,
2734 (uintmax_t)sasaddr);
2736 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2737 ccb->ccb_h.status = CAM_REQ_CMP;
2739 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2743 * We sync in both directions because we had DMAs in the S/G list
2744 * in both directions.
2746 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2747 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2748 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2749 mps_free_command(sc, cm);
2754 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2756 struct mps_command *cm;
2757 uint8_t *request, *response;
2758 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2759 struct mps_softc *sc;
2768 * XXX We don't yet support physical addresses here.
2770 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2771 case CAM_DATA_PADDR:
2772 case CAM_DATA_SG_PADDR:
2773 mps_dprint(sc, MPS_ERROR,
2774 "%s: physical addresses not supported\n", __func__);
2775 ccb->ccb_h.status = CAM_REQ_INVALID;
2780 * The chip does not support more than one buffer for the
2781 * request or response.
2783 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2784 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2785 mps_dprint(sc, MPS_ERROR,
2786 "%s: multiple request or response "
2787 "buffer segments not supported for SMP\n",
2789 ccb->ccb_h.status = CAM_REQ_INVALID;
2795 * The CAM_SCATTER_VALID flag was originally implemented
2796 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2797 * We have two. So, just take that flag to mean that we
2798 * might have S/G lists, and look at the S/G segment count
2799 * to figure out whether that is the case for each individual
2802 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2803 bus_dma_segment_t *req_sg;
2805 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2806 request = (uint8_t *)req_sg[0].ds_addr;
2808 request = ccb->smpio.smp_request;
2810 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2811 bus_dma_segment_t *rsp_sg;
2813 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2814 response = (uint8_t *)rsp_sg[0].ds_addr;
2816 response = ccb->smpio.smp_response;
2818 case CAM_DATA_VADDR:
2819 request = ccb->smpio.smp_request;
2820 response = ccb->smpio.smp_response;
2823 ccb->ccb_h.status = CAM_REQ_INVALID;
2828 cm = mps_alloc_command(sc);
2830 mps_dprint(sc, MPS_ERROR,
2831 "%s: cannot allocate command\n", __func__);
2832 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2837 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2838 bzero(req, sizeof(*req));
2839 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2841 /* Allow the chip to use any route to this SAS address. */
2842 req->PhysicalPort = 0xff;
2844 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2846 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2848 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2849 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2851 mpi_init_sge(cm, req, &req->SGL);
2854 * Set up a uio to pass into mps_map_command(). This allows us to
2855 * do one map command, and one busdma call in there.
2857 cm->cm_uio.uio_iov = cm->cm_iovec;
2858 cm->cm_uio.uio_iovcnt = 2;
2859 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2862 * The read/write flag isn't used by busdma, but set it just in
2863 * case. This isn't exactly accurate, either, since we're going in
2866 cm->cm_uio.uio_rw = UIO_WRITE;
2868 cm->cm_iovec[0].iov_base = request;
2869 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2870 cm->cm_iovec[1].iov_base = response;
2871 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2873 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2874 cm->cm_iovec[1].iov_len;
2877 * Trigger a warning message in mps_data_cb() for the user if we
2878 * wind up exceeding two S/G segments. The chip expects one
2879 * segment for the request and another for the response.
2881 cm->cm_max_segs = 2;
2883 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2884 cm->cm_complete = mpssas_smpio_complete;
2885 cm->cm_complete_data = ccb;
2888 * Tell the mapping code that we're using a uio, and that this is
2889 * an SMP passthrough request. There is a little special-case
2890 * logic there (in mps_data_cb()) to handle the bidirectional
2893 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2894 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2896 /* The chip data format is little endian. */
2897 req->SASAddress.High = htole32(sasaddr >> 32);
2898 req->SASAddress.Low = htole32(sasaddr);
2901 * XXX Note that we don't have a timeout/abort mechanism here.
2902 * From the manual, it looks like task management requests only
2903 * work for SCSI IO and SATA passthrough requests. We may need to
2904 * have a mechanism to retry requests in the event of a chip reset
2905 * at least. Hopefully the chip will insure that any errors short
2906 * of that are relayed back to the driver.
2908 error = mps_map_command(sc, cm);
2909 if ((error != 0) && (error != EINPROGRESS)) {
2910 mps_dprint(sc, MPS_ERROR,
2911 "%s: error %d returned from mps_map_command()\n",
2919 mps_free_command(sc, cm);
2920 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2927 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2929 struct mps_softc *sc;
2930 struct mpssas_target *targ;
2931 uint64_t sasaddr = 0;
2936 * Make sure the target exists.
2938 targ = &sassc->targets[ccb->ccb_h.target_id];
2939 if (targ->handle == 0x0) {
2940 mps_dprint(sc, MPS_ERROR,
2941 "%s: target %d does not exist!\n", __func__,
2942 ccb->ccb_h.target_id);
2943 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2949 * If this device has an embedded SMP target, we'll talk to it
2951 * figure out what the expander's address is.
2953 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2954 sasaddr = targ->sasaddr;
2957 * If we don't have a SAS address for the expander yet, try
2958 * grabbing it from the page 0x83 information cached in the
2959 * transport layer for this target. LSI expanders report the
2960 * expander SAS address as the port-associated SAS address in
2961 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2964 * XXX KDM disable this for now, but leave it commented out so that
2965 * it is obvious that this is another possible way to get the SAS
2968 * The parent handle method below is a little more reliable, and
2969 * the other benefit is that it works for devices other than SES
2970 * devices. So you can send a SMP request to a da(4) device and it
2971 * will get routed to the expander that device is attached to.
2972 * (Assuming the da(4) device doesn't contain an SMP target...)
2976 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2980 * If we still don't have a SAS address for the expander, look for
2981 * the parent device of this device, which is probably the expander.
2984 #ifdef OLD_MPS_PROBE
2985 struct mpssas_target *parent_target;
2988 if (targ->parent_handle == 0x0) {
2989 mps_dprint(sc, MPS_ERROR,
2990 "%s: handle %d does not have a valid "
2991 "parent handle!\n", __func__, targ->handle);
2992 ccb->ccb_h.status = CAM_REQ_INVALID;
2995 #ifdef OLD_MPS_PROBE
2996 parent_target = mpssas_find_target_by_handle(sassc, 0,
2997 targ->parent_handle);
2999 if (parent_target == NULL) {
3000 mps_dprint(sc, MPS_ERROR,
3001 "%s: handle %d does not have a valid "
3002 "parent target!\n", __func__, targ->handle);
3003 ccb->ccb_h.status = CAM_REQ_INVALID;
3007 if ((parent_target->devinfo &
3008 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3009 mps_dprint(sc, MPS_ERROR,
3010 "%s: handle %d parent %d does not "
3011 "have an SMP target!\n", __func__,
3012 targ->handle, parent_target->handle);
3013 ccb->ccb_h.status = CAM_REQ_INVALID;
3018 sasaddr = parent_target->sasaddr;
3019 #else /* OLD_MPS_PROBE */
3020 if ((targ->parent_devinfo &
3021 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3022 mps_dprint(sc, MPS_ERROR,
3023 "%s: handle %d parent %d does not "
3024 "have an SMP target!\n", __func__,
3025 targ->handle, targ->parent_handle);
3026 ccb->ccb_h.status = CAM_REQ_INVALID;
3030 if (targ->parent_sasaddr == 0x0) {
3031 mps_dprint(sc, MPS_ERROR,
3032 "%s: handle %d parent handle %d does "
3033 "not have a valid SAS address!\n",
3034 __func__, targ->handle, targ->parent_handle);
3035 ccb->ccb_h.status = CAM_REQ_INVALID;
3039 sasaddr = targ->parent_sasaddr;
3040 #endif /* OLD_MPS_PROBE */
3045 mps_dprint(sc, MPS_INFO,
3046 "%s: unable to find SAS address for handle %d\n",
3047 __func__, targ->handle);
3048 ccb->ccb_h.status = CAM_REQ_INVALID;
3051 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3059 #endif //__FreeBSD_version >= 900026
3062 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3064 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3065 struct mps_softc *sc;
3066 struct mps_command *tm;
3067 struct mpssas_target *targ;
3069 MPS_FUNCTRACE(sassc->sc);
3070 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3073 tm = mps_alloc_command(sc);
3075 mps_dprint(sc, MPS_ERROR,
3076 "comand alloc failure in mpssas_action_resetdev\n");
3077 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3082 targ = &sassc->targets[ccb->ccb_h.target_id];
3083 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3084 req->DevHandle = htole16(targ->handle);
3085 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3086 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3088 /* SAS Hard Link Reset / SATA Link Reset */
3089 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3092 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3093 tm->cm_complete = mpssas_resetdev_complete;
3094 tm->cm_complete_data = ccb;
3096 mps_map_command(sc, tm);
3100 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3102 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3106 mtx_assert(&sc->mps_mtx, MA_OWNED);
3108 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3109 ccb = tm->cm_complete_data;
3112 * Currently there should be no way we can hit this case. It only
3113 * happens when we have a failure to allocate chain frames, and
3114 * task management commands don't have S/G lists.
3116 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3117 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3119 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3121 mps_dprint(sc, MPS_ERROR,
3122 "%s: cm_flags = %#x for reset of handle %#04x! "
3123 "This should not happen!\n", __func__, tm->cm_flags,
3125 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3129 mps_dprint(sc, MPS_XINFO,
3130 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3131 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3133 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3134 ccb->ccb_h.status = CAM_REQ_CMP;
3135 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3139 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3143 mpssas_free_tm(sc, tm);
3148 mpssas_poll(struct cam_sim *sim)
3150 struct mpssas_softc *sassc;
3152 sassc = cam_sim_softc(sim);
3154 if (sassc->sc->mps_debug & MPS_TRACE) {
3155 /* frequent debug messages during a panic just slow
3156 * everything down too much.
3158 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3159 sassc->sc->mps_debug &= ~MPS_TRACE;
3162 mps_intr_locked(sassc->sc);
3166 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3169 struct mps_softc *sc;
3171 sc = (struct mps_softc *)callback_arg;
3174 #if (__FreeBSD_version >= 1000006) || \
3175 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3176 case AC_ADVINFO_CHANGED: {
3177 struct mpssas_target *target;
3178 struct mpssas_softc *sassc;
3179 struct scsi_read_capacity_data_long rcap_buf;
3180 struct ccb_dev_advinfo cdai;
3181 struct mpssas_lun *lun;
3186 buftype = (uintptr_t)arg;
3192 * We're only interested in read capacity data changes.
3194 if (buftype != CDAI_TYPE_RCAPLONG)
3198 * We should have a handle for this, but check to make sure.
3200 target = &sassc->targets[xpt_path_target_id(path)];
3201 if (target->handle == 0)
3204 lunid = xpt_path_lun_id(path);
3206 SLIST_FOREACH(lun, &target->luns, lun_link) {
3207 if (lun->lun_id == lunid) {
3213 if (found_lun == 0) {
3214 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3217 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3218 "LUN for EEDP support.\n");
3221 lun->lun_id = lunid;
3222 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3225 bzero(&rcap_buf, sizeof(rcap_buf));
3226 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3227 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3228 cdai.ccb_h.flags = CAM_DIR_IN;
3229 cdai.buftype = CDAI_TYPE_RCAPLONG;
3231 cdai.bufsiz = sizeof(rcap_buf);
3232 cdai.buf = (uint8_t *)&rcap_buf;
3233 xpt_action((union ccb *)&cdai);
3234 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3235 cam_release_devq(cdai.ccb_h.path,
3238 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3239 && (rcap_buf.prot & SRC16_PROT_EN)) {
3240 lun->eedp_formatted = TRUE;
3241 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3243 lun->eedp_formatted = FALSE;
3244 lun->eedp_block_size = 0;
3249 case AC_FOUND_DEVICE: {
3250 struct ccb_getdev *cgd;
3253 mpssas_check_eedp(sc, path, cgd);
3262 #if (__FreeBSD_version < 901503) || \
3263 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3265 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3266 struct ccb_getdev *cgd)
3268 struct mpssas_softc *sassc = sc->sassc;
3269 struct ccb_scsiio *csio;
3270 struct scsi_read_capacity_16 *scsi_cmd;
3271 struct scsi_read_capacity_eedp *rcap_buf;
3273 target_id_t targetid;
3276 struct cam_path *local_path;
3277 struct mpssas_target *target;
3278 struct mpssas_lun *lun;
3283 pathid = cam_sim_path(sassc->sim);
3284 targetid = xpt_path_target_id(path);
3285 lunid = xpt_path_lun_id(path);
3287 target = &sassc->targets[targetid];
3288 if (target->handle == 0x0)
3292 * Determine if the device is EEDP capable.
3294 * If this flag is set in the inquiry data,
3295 * the device supports protection information,
3296 * and must support the 16 byte read
3297 * capacity command, otherwise continue without
3298 * sending read cap 16
3300 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3304 * Issue a READ CAPACITY 16 command. This info
3305 * is used to determine if the LUN is formatted
3308 ccb = xpt_alloc_ccb_nowait();
3310 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3311 "for EEDP support.\n");
3315 if (xpt_create_path(&local_path, xpt_periph,
3316 pathid, targetid, lunid) != CAM_REQ_CMP) {
3317 mps_dprint(sc, MPS_ERROR, "Unable to create "
3318 "path for EEDP support\n");
3324 * If LUN is already in list, don't create a new
3328 SLIST_FOREACH(lun, &target->luns, lun_link) {
3329 if (lun->lun_id == lunid) {
3335 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3338 mps_dprint(sc, MPS_ERROR,
3339 "Unable to alloc LUN for EEDP support.\n");
3340 xpt_free_path(local_path);
3344 lun->lun_id = lunid;
3345 SLIST_INSERT_HEAD(&target->luns, lun,
3349 xpt_path_string(local_path, path_str, sizeof(path_str));
3350 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3351 path_str, target->handle);
3354 * Issue a READ CAPACITY 16 command for the LUN.
3355 * The mpssas_read_cap_done function will load
3356 * the read cap info into the LUN struct.
3358 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3359 M_MPT2, M_NOWAIT | M_ZERO);
3360 if (rcap_buf == NULL) {
3361 mps_dprint(sc, MPS_FAULT,
3362 "Unable to alloc read capacity buffer for EEDP support.\n");
3363 xpt_free_path(ccb->ccb_h.path);
3367 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3369 csio->ccb_h.func_code = XPT_SCSI_IO;
3370 csio->ccb_h.flags = CAM_DIR_IN;
3371 csio->ccb_h.retry_count = 4;
3372 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3373 csio->ccb_h.timeout = 60000;
3374 csio->data_ptr = (uint8_t *)rcap_buf;
3375 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3376 csio->sense_len = MPS_SENSE_LEN;
3377 csio->cdb_len = sizeof(*scsi_cmd);
3378 csio->tag_action = MSG_SIMPLE_Q_TAG;
3380 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3381 bzero(scsi_cmd, sizeof(*scsi_cmd));
3382 scsi_cmd->opcode = 0x9E;
3383 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3384 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3386 ccb->ccb_h.ppriv_ptr1 = sassc;
3391 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3393 struct mpssas_softc *sassc;
3394 struct mpssas_target *target;
3395 struct mpssas_lun *lun;
3396 struct scsi_read_capacity_eedp *rcap_buf;
3398 if (done_ccb == NULL)
3401 /* Driver need to release devq, it Scsi command is
3402 * generated by driver internally.
3403 * Currently there is a single place where driver
3404 * calls scsi command internally. In future if driver
3405 * calls more scsi command internally, it needs to release
3406 * devq internally, since those command will not go back to
3409 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3410 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3411 xpt_release_devq(done_ccb->ccb_h.path,
3412 /*count*/ 1, /*run_queue*/TRUE);
3415 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3418 * Get the LUN ID for the path and look it up in the LUN list for the
3421 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3422 target = &sassc->targets[done_ccb->ccb_h.target_id];
3423 SLIST_FOREACH(lun, &target->luns, lun_link) {
3424 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3428 * Got the LUN in the target's LUN list. Fill it in
3429 * with EEDP info. If the READ CAP 16 command had some
3430 * SCSI error (common if command is not supported), mark
3431 * the lun as not supporting EEDP and set the block size
3434 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3435 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3436 lun->eedp_formatted = FALSE;
3437 lun->eedp_block_size = 0;
3441 if (rcap_buf->protect & 0x01) {
3442 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3443 "target ID %d is formatted for EEDP "
3444 "support.\n", done_ccb->ccb_h.target_lun,
3445 done_ccb->ccb_h.target_id);
3446 lun->eedp_formatted = TRUE;
3447 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3452 // Finished with this CCB and path.
3453 free(rcap_buf, M_MPT2);
3454 xpt_free_path(done_ccb->ccb_h.path);
3455 xpt_free_ccb(done_ccb);
3457 #endif /* (__FreeBSD_version < 901503) || \
3458 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3461 mpssas_startup(struct mps_softc *sc)
3463 struct mpssas_softc *sassc;
3466 * Send the port enable message and set the wait_for_port_enable flag.
3467 * This flag helps to keep the simq frozen until all discovery events
3471 mpssas_startup_increment(sassc);
3472 sc->wait_for_port_enable = 1;
3473 mpssas_send_portenable(sc);
3478 mpssas_send_portenable(struct mps_softc *sc)
3480 MPI2_PORT_ENABLE_REQUEST *request;
3481 struct mps_command *cm;
3485 if ((cm = mps_alloc_command(sc)) == NULL)
3487 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3488 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3489 request->MsgFlags = 0;
3491 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3492 cm->cm_complete = mpssas_portenable_complete;
3496 mps_map_command(sc, cm);
3497 mps_dprint(sc, MPS_XINFO,
3498 "mps_send_portenable finished cm %p req %p complete %p\n",
3499 cm, cm->cm_req, cm->cm_complete);
3504 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3506 MPI2_PORT_ENABLE_REPLY *reply;
3507 struct mpssas_softc *sassc;
3513 * Currently there should be no way we can hit this case. It only
3514 * happens when we have a failure to allocate chain frames, and
3515 * port enable commands don't have S/G lists.
3517 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3518 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3519 "This should not happen!\n", __func__, cm->cm_flags);
3522 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3524 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3525 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3526 MPI2_IOCSTATUS_SUCCESS)
3527 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3529 mps_free_command(sc, cm);
3530 if (sc->mps_ich.ich_arg != NULL) {
3531 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3532 config_intrhook_disestablish(&sc->mps_ich);
3533 sc->mps_ich.ich_arg = NULL;
3537 * Get WarpDrive info after discovery is complete but before the scan
3538 * starts. At this point, all devices are ready to be exposed to the
3539 * OS. If devices should be hidden instead, take them out of the
3540 * 'targets' array before the scan. The devinfo for a disk will have
3541 * some info and a volume's will be 0. Use that to remove disks.
3543 mps_wd_config_pages(sc);
3546 * Done waiting for port enable to complete. Decrement the refcount.
3547 * If refcount is 0, discovery is complete and a rescan of the bus can
3548 * take place. Since the simq was explicitly frozen before port
3549 * enable, it must be explicitly released here to keep the
3550 * freeze/release count in sync.
3552 sc->wait_for_port_enable = 0;
3553 sc->port_enable_complete = 1;
3554 wakeup(&sc->port_enable_complete);
3555 mpssas_startup_decrement(sassc);
3556 xpt_release_simq(sassc->sim, 1);