2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011, 2012 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * LSI MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for LSI MPT2 */
37 /* TODO Move headers to mpsvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_xpt.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/mps/mpi/mpi2_type.h>
77 #include <dev/mps/mpi/mpi2.h>
78 #include <dev/mps/mpi/mpi2_ioc.h>
79 #include <dev/mps/mpi/mpi2_sas.h>
80 #include <dev/mps/mpi/mpi2_cnfg.h>
81 #include <dev/mps/mpi/mpi2_init.h>
82 #include <dev/mps/mpi/mpi2_tool.h>
83 #include <dev/mps/mps_ioctl.h>
84 #include <dev/mps/mpsvar.h>
85 #include <dev/mps/mps_table.h>
86 #include <dev/mps/mps_sas.h>
88 #define MPSSAS_DISCOVERY_TIMEOUT 20
89 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
92 * static array to check SCSI OpCode for EEDP protection bits
94 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
95 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 static uint8_t op_code_prot[256] = {
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
116 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
118 static void mpssas_discovery_timeout(void *data);
119 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
120 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
121 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mpssas_poll(struct cam_sim *sim);
123 static void mpssas_scsiio_timeout(void *data);
124 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
125 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
126 struct mps_command *cm, union ccb *ccb);
127 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
128 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
129 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
130 #if __FreeBSD_version >= 900026
131 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
132 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
134 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
135 #endif //FreeBSD_version >= 900026
136 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
137 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm);
138 static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type);
139 static void mpssas_async(void *callback_arg, uint32_t code,
140 struct cam_path *path, void *arg);
141 #if (__FreeBSD_version < 901503) || \
142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
143 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
144 struct ccb_getdev *cgd);
145 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
147 static int mpssas_send_portenable(struct mps_softc *sc);
148 static void mpssas_portenable_complete(struct mps_softc *sc,
149 struct mps_command *cm);
151 struct mpssas_target *
152 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
154 struct mpssas_target *target;
157 for (i = start; i < sassc->sc->facts->MaxTargets; i++) {
158 target = &sassc->targets[i];
159 if (target->handle == handle)
166 /* we need to freeze the simq during attach and diag reset, to avoid failing
167 * commands before device handles have been found by discovery. Since
168 * discovery involves reading config pages and possibly sending commands,
169 * discovery actions may continue even after we receive the end of discovery
170 * event, so refcount discovery actions instead of assuming we can unfreeze
171 * the simq when we get the event.
174 mpssas_startup_increment(struct mpssas_softc *sassc)
176 MPS_FUNCTRACE(sassc->sc);
178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
179 if (sassc->startup_refcount++ == 0) {
180 /* just starting, freeze the simq */
181 mps_dprint(sassc->sc, MPS_INIT,
182 "%s freezing simq\n", __func__);
183 #if (__FreeBSD_version >= 1000039) || \
184 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
187 xpt_freeze_simq(sassc->sim, 1);
189 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
190 sassc->startup_refcount);
195 mpssas_startup_decrement(struct mpssas_softc *sassc)
197 MPS_FUNCTRACE(sassc->sc);
199 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
200 if (--sassc->startup_refcount == 0) {
201 /* finished all discovery-related actions, release
202 * the simq and rescan for the latest topology.
204 mps_dprint(sassc->sc, MPS_INIT,
205 "%s releasing simq\n", __func__);
206 sassc->flags &= ~MPSSAS_IN_STARTUP;
207 xpt_release_simq(sassc->sim, 1);
208 #if (__FreeBSD_version >= 1000039) || \
209 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
212 mpssas_rescan_target(sassc->sc, NULL);
215 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
216 sassc->startup_refcount);
220 /* LSI's firmware requires us to stop sending commands when we're doing task
221 * management, so refcount the TMs and keep the simq frozen when any are in
225 mpssas_alloc_tm(struct mps_softc *sc)
227 struct mps_command *tm;
230 tm = mps_alloc_high_priority_command(sc);
232 if (sc->sassc->tm_count++ == 0) {
233 mps_dprint(sc, MPS_RECOVERY,
234 "%s freezing simq\n", __func__);
235 xpt_freeze_simq(sc->sassc->sim, 1);
237 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
238 sc->sassc->tm_count);
244 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
246 mps_dprint(sc, MPS_TRACE, "%s", __func__);
250 /* if there are no TMs in use, we can release the simq. We use our
251 * own refcount so that it's easier for a diag reset to cleanup and
254 if (--sc->sassc->tm_count == 0) {
255 mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__);
256 xpt_release_simq(sc->sassc->sim, 1);
258 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__,
259 sc->sassc->tm_count);
261 mps_free_high_priority_command(sc, tm);
265 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
267 struct mpssas_softc *sassc = sc->sassc;
269 target_id_t targetid;
273 pathid = cam_sim_path(sassc->sim);
275 targetid = CAM_TARGET_WILDCARD;
277 targetid = targ - sassc->targets;
280 * Allocate a CCB and schedule a rescan.
282 ccb = xpt_alloc_ccb_nowait();
284 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
288 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
289 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
290 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
295 if (targetid == CAM_TARGET_WILDCARD)
296 ccb->ccb_h.func_code = XPT_SCAN_BUS;
298 ccb->ccb_h.func_code = XPT_SCAN_TGT;
300 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
305 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
315 /* No need to be in here if debugging isn't enabled */
316 if ((cm->cm_sc->mps_debug & level) == 0)
319 sbuf_new(&sb, str, sizeof(str), 0);
323 if (cm->cm_ccb != NULL) {
324 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
326 sbuf_cat(&sb, path_str);
327 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
328 scsi_command_string(&cm->cm_ccb->csio, &sb);
329 sbuf_printf(&sb, "length %d ",
330 cm->cm_ccb->csio.dxfer_len);
334 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
335 cam_sim_name(cm->cm_sc->sassc->sim),
336 cam_sim_unit(cm->cm_sc->sassc->sim),
337 cam_sim_bus(cm->cm_sc->sassc->sim),
338 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
342 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
343 sbuf_vprintf(&sb, fmt, ap);
345 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
352 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
354 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
355 struct mpssas_target *targ;
360 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
361 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
365 /* XXX retry the remove after the diag reset completes? */
366 mps_dprint(sc, MPS_FAULT,
367 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
368 mpssas_free_tm(sc, tm);
372 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
373 mps_dprint(sc, MPS_FAULT,
374 "IOCStatus = 0x%x while resetting device 0x%x\n",
375 reply->IOCStatus, handle);
376 mpssas_free_tm(sc, tm);
380 mps_dprint(sc, MPS_XINFO,
381 "Reset aborted %u commands\n", reply->TerminationCount);
382 mps_free_reply(sc, tm->cm_reply_data);
383 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
385 mps_dprint(sc, MPS_XINFO,
386 "clearing target %u handle 0x%04x\n", targ->tid, handle);
389 * Don't clear target if remove fails because things will get confusing.
390 * Leave the devname and sasaddr intact so that we know to avoid reusing
391 * this target id if possible, and so we can assign the same target id
392 * to this device if it comes back in the future.
394 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
397 targ->encl_handle = 0x0;
398 targ->encl_slot = 0x0;
399 targ->exp_dev_handle = 0x0;
401 targ->linkrate = 0x0;
406 mpssas_free_tm(sc, tm);
411 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
412 * Otherwise Volume Delete is same as Bare Drive Removal.
415 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
417 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
418 struct mps_softc *sc;
419 struct mps_command *cm;
420 struct mpssas_target *targ = NULL;
422 MPS_FUNCTRACE(sassc->sc);
427 * If this is a WD controller, determine if the disk should be exposed
428 * to the OS or not. If disk should be exposed, return from this
429 * function without doing anything.
431 if (sc->WD_available && (sc->WD_hide_expose ==
432 MPS_WD_EXPOSE_ALWAYS)) {
437 targ = mpssas_find_target_by_handle(sassc, 0, handle);
439 /* FIXME: what is the action? */
440 /* We don't know about this device? */
441 mps_dprint(sc, MPS_ERROR,
442 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 targ->flags |= MPSSAS_TARGET_INREMOVAL;
448 cm = mpssas_alloc_tm(sc);
450 mps_dprint(sc, MPS_ERROR,
451 "%s: command alloc failure\n", __func__);
455 mpssas_rescan_target(sc, targ);
457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
458 req->DevHandle = targ->handle;
459 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
460 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
462 /* SAS Hard Link Reset / SATA Link Reset */
463 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 cm->cm_desc.HighPriority.RequestFlags =
468 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
469 cm->cm_complete = mpssas_remove_volume;
470 cm->cm_complete_data = (void *)(uintptr_t)handle;
471 mps_map_command(sc, cm);
475 * The MPT2 firmware performs debounce on the link to avoid transient link
476 * errors and false removals. When it does decide that link has been lost
477 * and a device need to go away, it expects that the host will perform a
478 * target reset and then an op remove. The reset has the side-effect of
479 * aborting any outstanding requests for the device, which is required for
480 * the op-remove to succeed. It's not clear if the host should check for
481 * the device coming back alive after the reset.
484 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
486 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
487 struct mps_softc *sc;
488 struct mps_command *cm;
489 struct mpssas_target *targ = NULL;
491 MPS_FUNCTRACE(sassc->sc);
495 targ = mpssas_find_target_by_handle(sassc, 0, handle);
497 /* FIXME: what is the action? */
498 /* We don't know about this device? */
499 mps_dprint(sc, MPS_ERROR,
500 "%s : invalid handle 0x%x \n", __func__, handle);
504 targ->flags |= MPSSAS_TARGET_INREMOVAL;
506 cm = mpssas_alloc_tm(sc);
508 mps_dprint(sc, MPS_ERROR,
509 "%s: command alloc failure\n", __func__);
513 mpssas_rescan_target(sc, targ);
515 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
516 memset(req, 0, sizeof(*req));
517 req->DevHandle = htole16(targ->handle);
518 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
519 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
521 /* SAS Hard Link Reset / SATA Link Reset */
522 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
526 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
527 cm->cm_complete = mpssas_remove_device;
528 cm->cm_complete_data = (void *)(uintptr_t)handle;
529 mps_map_command(sc, cm);
533 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
535 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
536 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
537 struct mpssas_target *targ;
538 struct mps_command *next_cm;
543 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
544 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
548 * Currently there should be no way we can hit this case. It only
549 * happens when we have a failure to allocate chain frames, and
550 * task management commands don't have S/G lists.
552 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
553 mps_dprint(sc, MPS_ERROR,
554 "%s: cm_flags = %#x for remove of handle %#04x! "
555 "This should not happen!\n", __func__, tm->cm_flags,
557 mpssas_free_tm(sc, tm);
562 /* XXX retry the remove after the diag reset completes? */
563 mps_dprint(sc, MPS_FAULT,
564 "%s NULL reply reseting device 0x%04x\n", __func__, handle);
565 mpssas_free_tm(sc, tm);
569 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
570 mps_dprint(sc, MPS_FAULT,
571 "IOCStatus = 0x%x while resetting device 0x%x\n",
572 le16toh(reply->IOCStatus), handle);
573 mpssas_free_tm(sc, tm);
577 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
578 le32toh(reply->TerminationCount));
579 mps_free_reply(sc, tm->cm_reply_data);
580 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
582 /* Reuse the existing command */
583 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
584 memset(req, 0, sizeof(*req));
585 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
586 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
587 req->DevHandle = htole16(handle);
589 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
590 tm->cm_complete = mpssas_remove_complete;
591 tm->cm_complete_data = (void *)(uintptr_t)handle;
593 mps_map_command(sc, tm);
595 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
597 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
600 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
601 ccb = tm->cm_complete_data;
602 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
603 mpssas_scsiio_complete(sc, tm);
608 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
610 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
612 struct mpssas_target *targ;
613 struct mpssas_lun *lun;
617 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
618 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
621 * Currently there should be no way we can hit this case. It only
622 * happens when we have a failure to allocate chain frames, and
623 * task management commands don't have S/G lists.
625 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
626 mps_dprint(sc, MPS_XINFO,
627 "%s: cm_flags = %#x for remove of handle %#04x! "
628 "This should not happen!\n", __func__, tm->cm_flags,
630 mpssas_free_tm(sc, tm);
635 /* most likely a chip reset */
636 mps_dprint(sc, MPS_FAULT,
637 "%s NULL reply removing device 0x%04x\n", __func__, handle);
638 mpssas_free_tm(sc, tm);
642 mps_dprint(sc, MPS_XINFO,
643 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
644 handle, le16toh(reply->IOCStatus));
647 * Don't clear target if remove fails because things will get confusing.
648 * Leave the devname and sasaddr intact so that we know to avoid reusing
649 * this target id if possible, and so we can assign the same target id
650 * to this device if it comes back in the future.
652 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
655 targ->encl_handle = 0x0;
656 targ->encl_slot = 0x0;
657 targ->exp_dev_handle = 0x0;
659 targ->linkrate = 0x0;
663 while(!SLIST_EMPTY(&targ->luns)) {
664 lun = SLIST_FIRST(&targ->luns);
665 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
671 mpssas_free_tm(sc, tm);
675 mpssas_register_events(struct mps_softc *sc)
677 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
680 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
681 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
682 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
683 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
684 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
685 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
686 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
687 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
688 setbit(events, MPI2_EVENT_IR_VOLUME);
689 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
690 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
691 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
693 mps_register_events(sc, events, mpssas_evt_handler, NULL,
694 &sc->sassc->mpssas_eh);
700 mps_attach_sas(struct mps_softc *sc)
702 struct mpssas_softc *sassc;
708 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
710 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
714 sassc->targets = malloc(sizeof(struct mpssas_target) *
715 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO);
716 if(!sassc->targets) {
717 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n",
725 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
726 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
731 unit = device_get_unit(sc->mps_dev);
732 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
733 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
734 if (sassc->sim == NULL) {
735 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n");
740 TAILQ_INIT(&sassc->ev_queue);
742 /* Initialize taskqueue for Event Handling */
743 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
744 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
745 taskqueue_thread_enqueue, &sassc->ev_tq);
747 /* Run the task queue with lowest priority */
748 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
749 device_get_nameunit(sc->mps_dev));
754 * XXX There should be a bus for every port on the adapter, but since
755 * we're just going to fake the topology for now, we'll pretend that
756 * everything is just a target on a single bus.
758 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
759 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n",
766 * Assume that discovery events will start right away.
768 * Hold off boot until discovery is complete.
770 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
771 sc->sassc->startup_refcount = 0;
772 mpssas_startup_increment(sassc);
774 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
775 sassc->discovery_timeouts = 0;
780 * Register for async events so we can determine the EEDP
781 * capabilities of devices.
783 status = xpt_create_path(&sassc->path, /*periph*/NULL,
784 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
786 if (status != CAM_REQ_CMP) {
787 mps_printf(sc, "Error %#x creating sim path\n", status);
792 #if (__FreeBSD_version >= 1000006) || \
793 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
794 event = AC_ADVINFO_CHANGED;
796 event = AC_FOUND_DEVICE;
798 status = xpt_register_async(event, mpssas_async, sc,
800 if (status != CAM_REQ_CMP) {
801 mps_dprint(sc, MPS_ERROR,
802 "Error %#x registering async handler for "
803 "AC_ADVINFO_CHANGED events\n", status);
804 xpt_free_path(sassc->path);
808 if (status != CAM_REQ_CMP) {
810 * EEDP use is the exception, not the rule.
811 * Warn the user, but do not fail to attach.
813 mps_printf(sc, "EEDP capabilities disabled.\n");
818 mpssas_register_events(sc);
826 mps_detach_sas(struct mps_softc *sc)
828 struct mpssas_softc *sassc;
829 struct mpssas_lun *lun, *lun_tmp;
830 struct mpssas_target *targ;
835 if (sc->sassc == NULL)
839 mps_deregister_events(sc, sassc->mpssas_eh);
842 * Drain and free the event handling taskqueue with the lock
843 * unheld so that any parallel processing tasks drain properly
844 * without deadlocking.
846 if (sassc->ev_tq != NULL)
847 taskqueue_free(sassc->ev_tq);
849 /* Make sure CAM doesn't wedge if we had to bail out early. */
852 /* Deregister our async handler */
853 if (sassc->path != NULL) {
854 xpt_register_async(0, mpssas_async, sc, sassc->path);
855 xpt_free_path(sassc->path);
859 if (sassc->flags & MPSSAS_IN_STARTUP)
860 xpt_release_simq(sassc->sim, 1);
862 if (sassc->sim != NULL) {
863 xpt_bus_deregister(cam_sim_path(sassc->sim));
864 cam_sim_free(sassc->sim, FALSE);
867 sassc->flags |= MPSSAS_SHUTDOWN;
870 if (sassc->devq != NULL)
871 cam_simq_free(sassc->devq);
873 for(i=0; i< sc->facts->MaxTargets ;i++) {
874 targ = &sassc->targets[i];
875 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
879 free(sassc->targets, M_MPT2);
887 mpssas_discovery_end(struct mpssas_softc *sassc)
889 struct mps_softc *sc = sassc->sc;
893 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
894 callout_stop(&sassc->discovery_callout);
899 mpssas_discovery_timeout(void *data)
901 struct mpssas_softc *sassc = data;
902 struct mps_softc *sc;
908 mps_dprint(sc, MPS_INFO,
909 "Timeout waiting for discovery, interrupts may not be working!\n");
910 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING;
912 /* Poll the hardware for events in case interrupts aren't working */
915 mps_dprint(sassc->sc, MPS_INFO,
916 "Finished polling after discovery timeout at %d\n", ticks);
918 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) {
919 mpssas_discovery_end(sassc);
921 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) {
922 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING;
923 callout_reset(&sassc->discovery_callout,
924 MPSSAS_DISCOVERY_TIMEOUT * hz,
925 mpssas_discovery_timeout, sassc);
926 sassc->discovery_timeouts++;
928 mps_dprint(sassc->sc, MPS_FAULT,
929 "Discovery timed out, continuing.\n");
930 sassc->flags &= ~MPSSAS_IN_DISCOVERY;
931 mpssas_discovery_end(sassc);
939 mpssas_action(struct cam_sim *sim, union ccb *ccb)
941 struct mpssas_softc *sassc;
943 sassc = cam_sim_softc(sim);
945 MPS_FUNCTRACE(sassc->sc);
946 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
947 ccb->ccb_h.func_code);
948 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
950 switch (ccb->ccb_h.func_code) {
953 struct ccb_pathinq *cpi = &ccb->cpi;
955 cpi->version_num = 1;
956 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
957 cpi->target_sprt = 0;
958 #if (__FreeBSD_version >= 1000039) || \
959 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
960 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
962 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
964 cpi->hba_eng_cnt = 0;
965 cpi->max_target = sassc->sc->facts->MaxTargets - 1;
967 cpi->initiator_id = sassc->sc->facts->MaxTargets - 1;
968 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
969 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
970 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
971 cpi->unit_number = cam_sim_unit(sim);
972 cpi->bus_id = cam_sim_bus(sim);
973 cpi->base_transfer_speed = 150000;
974 cpi->transport = XPORT_SAS;
975 cpi->transport_version = 0;
976 cpi->protocol = PROTO_SCSI;
977 cpi->protocol_version = SCSI_REV_SPC;
978 #if __FreeBSD_version >= 800001
980 * XXX KDM where does this number come from?
982 cpi->maxio = 256 * 1024;
984 cpi->ccb_h.status = CAM_REQ_CMP;
987 case XPT_GET_TRAN_SETTINGS:
989 struct ccb_trans_settings *cts;
990 struct ccb_trans_settings_sas *sas;
991 struct ccb_trans_settings_scsi *scsi;
992 struct mpssas_target *targ;
995 sas = &cts->xport_specific.sas;
996 scsi = &cts->proto_specific.scsi;
998 targ = &sassc->targets[cts->ccb_h.target_id];
999 if (targ->handle == 0x0) {
1000 cts->ccb_h.status = CAM_SEL_TIMEOUT;
1004 cts->protocol_version = SCSI_REV_SPC2;
1005 cts->transport = XPORT_SAS;
1006 cts->transport_version = 0;
1008 sas->valid = CTS_SAS_VALID_SPEED;
1009 switch (targ->linkrate) {
1011 sas->bitrate = 150000;
1014 sas->bitrate = 300000;
1017 sas->bitrate = 600000;
1023 cts->protocol = PROTO_SCSI;
1024 scsi->valid = CTS_SCSI_VALID_TQ;
1025 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1027 cts->ccb_h.status = CAM_REQ_CMP;
1030 case XPT_CALC_GEOMETRY:
1031 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1032 ccb->ccb_h.status = CAM_REQ_CMP;
1035 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1036 mpssas_action_resetdev(sassc, ccb);
1041 mps_dprint(sassc->sc, MPS_XINFO,
1042 "mpssas_action faking success for abort or reset\n");
1043 ccb->ccb_h.status = CAM_REQ_CMP;
1046 mpssas_action_scsiio(sassc, ccb);
1048 #if __FreeBSD_version >= 900026
1050 mpssas_action_smpio(sassc, ccb);
1054 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1062 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1063 target_id_t target_id, lun_id_t lun_id)
1065 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1066 struct cam_path *path;
1068 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__,
1069 ac_code, target_id, lun_id);
1071 if (xpt_create_path(&path, NULL,
1072 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1073 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1078 xpt_async(ac_code, path, NULL);
1079 xpt_free_path(path);
1083 mpssas_complete_all_commands(struct mps_softc *sc)
1085 struct mps_command *cm;
1090 mtx_assert(&sc->mps_mtx, MA_OWNED);
1092 /* complete all commands with a NULL reply */
1093 for (i = 1; i < sc->num_reqs; i++) {
1094 cm = &sc->commands[i];
1095 cm->cm_reply = NULL;
1098 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1099 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1101 if (cm->cm_complete != NULL) {
1102 mpssas_log_command(cm, MPS_RECOVERY,
1103 "completing cm %p state %x ccb %p for diag reset\n",
1104 cm, cm->cm_state, cm->cm_ccb);
1106 cm->cm_complete(sc, cm);
1110 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1111 mpssas_log_command(cm, MPS_RECOVERY,
1112 "waking up cm %p state %x ccb %p for diag reset\n",
1113 cm, cm->cm_state, cm->cm_ccb);
1118 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1119 /* this should never happen, but if it does, log */
1120 mpssas_log_command(cm, MPS_RECOVERY,
1121 "cm %p state %x flags 0x%x ccb %p during diag "
1122 "reset\n", cm, cm->cm_state, cm->cm_flags,
1129 mpssas_handle_reinit(struct mps_softc *sc)
1133 /* Go back into startup mode and freeze the simq, so that CAM
1134 * doesn't send any commands until after we've rediscovered all
1135 * targets and found the proper device handles for them.
1137 * After the reset, portenable will trigger discovery, and after all
1138 * discovery-related activities have finished, the simq will be
1141 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1142 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1143 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1144 mpssas_startup_increment(sc->sassc);
1146 /* notify CAM of a bus reset */
1147 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1150 /* complete and cleanup after all outstanding commands */
1151 mpssas_complete_all_commands(sc);
1153 mps_dprint(sc, MPS_INIT,
1154 "%s startup %u tm %u after command completion\n",
1155 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count);
1157 /* zero all the target handles, since they may change after the
1158 * reset, and we have to rediscover all the targets and use the new
1161 for (i = 0; i < sc->facts->MaxTargets; i++) {
1162 if (sc->sassc->targets[i].outstanding != 0)
1163 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1164 i, sc->sassc->targets[i].outstanding);
1165 sc->sassc->targets[i].handle = 0x0;
1166 sc->sassc->targets[i].exp_dev_handle = 0x0;
1167 sc->sassc->targets[i].outstanding = 0;
1168 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1173 mpssas_tm_timeout(void *data)
1175 struct mps_command *tm = data;
1176 struct mps_softc *sc = tm->cm_sc;
1178 mtx_assert(&sc->mps_mtx, MA_OWNED);
1180 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1181 "task mgmt %p timed out\n", tm);
1186 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1188 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1189 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1190 unsigned int cm_count = 0;
1191 struct mps_command *cm;
1192 struct mpssas_target *targ;
1194 callout_stop(&tm->cm_callout);
1196 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1197 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1201 * Currently there should be no way we can hit this case. It only
1202 * happens when we have a failure to allocate chain frames, and
1203 * task management commands don't have S/G lists.
1204 * XXXSL So should it be an assertion?
1206 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1207 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! "
1208 "This should not happen!\n", __func__, tm->cm_flags);
1209 mpssas_free_tm(sc, tm);
1213 if (reply == NULL) {
1214 mpssas_log_command(tm, MPS_RECOVERY,
1215 "NULL reset reply for tm %p\n", tm);
1216 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1217 /* this completion was due to a reset, just cleanup */
1218 targ->flags &= ~MPSSAS_TARGET_INRESET;
1220 mpssas_free_tm(sc, tm);
1223 /* we should have gotten a reply. */
1229 mpssas_log_command(tm, MPS_RECOVERY,
1230 "logical unit reset status 0x%x code 0x%x count %u\n",
1231 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1232 le32toh(reply->TerminationCount));
1234 /* See if there are any outstanding commands for this LUN.
1235 * This could be made more efficient by using a per-LU data
1236 * structure of some sort.
1238 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1239 if (cm->cm_lun == tm->cm_lun)
1243 if (cm_count == 0) {
1244 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1245 "logical unit %u finished recovery after reset\n",
1248 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1251 /* we've finished recovery for this logical unit. check and
1252 * see if some other logical unit has a timedout command
1253 * that needs to be processed.
1255 cm = TAILQ_FIRST(&targ->timedout_commands);
1257 mpssas_send_abort(sc, tm, cm);
1261 mpssas_free_tm(sc, tm);
1265 /* if we still have commands for this LUN, the reset
1266 * effectively failed, regardless of the status reported.
1267 * Escalate to a target reset.
1269 mpssas_log_command(tm, MPS_RECOVERY,
1270 "logical unit reset complete for tm %p, but still have %u command(s)\n",
1272 mpssas_send_reset(sc, tm,
1273 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1278 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1280 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1281 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1282 struct mpssas_target *targ;
1284 callout_stop(&tm->cm_callout);
1286 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1287 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1291 * Currently there should be no way we can hit this case. It only
1292 * happens when we have a failure to allocate chain frames, and
1293 * task management commands don't have S/G lists.
1295 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1296 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1297 "This should not happen!\n", __func__, tm->cm_flags);
1298 mpssas_free_tm(sc, tm);
1302 if (reply == NULL) {
1303 mpssas_log_command(tm, MPS_RECOVERY,
1304 "NULL reset reply for tm %p\n", tm);
1305 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1306 /* this completion was due to a reset, just cleanup */
1307 targ->flags &= ~MPSSAS_TARGET_INRESET;
1309 mpssas_free_tm(sc, tm);
1312 /* we should have gotten a reply. */
1318 mpssas_log_command(tm, MPS_RECOVERY,
1319 "target reset status 0x%x code 0x%x count %u\n",
1320 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1321 le32toh(reply->TerminationCount));
1323 targ->flags &= ~MPSSAS_TARGET_INRESET;
1325 if (targ->outstanding == 0) {
1326 /* we've finished recovery for this target and all
1327 * of its logical units.
1329 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1330 "recovery finished after target reset\n");
1332 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1336 mpssas_free_tm(sc, tm);
1339 /* after a target reset, if this target still has
1340 * outstanding commands, the reset effectively failed,
1341 * regardless of the status reported. escalate.
1343 mpssas_log_command(tm, MPS_RECOVERY,
1344 "target reset complete for tm %p, but still have %u command(s)\n",
1345 tm, targ->outstanding);
1350 #define MPS_RESET_TIMEOUT 30
1353 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1355 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1356 struct mpssas_target *target;
1359 target = tm->cm_targ;
1360 if (target->handle == 0) {
1361 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1362 __func__, target->tid);
1366 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1367 req->DevHandle = htole16(target->handle);
1368 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1369 req->TaskType = type;
1371 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1372 /* XXX Need to handle invalid LUNs */
1373 MPS_SET_LUN(req->LUN, tm->cm_lun);
1374 tm->cm_targ->logical_unit_resets++;
1375 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1376 "sending logical unit reset\n");
1377 tm->cm_complete = mpssas_logical_unit_reset_complete;
1379 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1380 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */
1381 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1382 tm->cm_targ->target_resets++;
1383 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET;
1384 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1385 "sending target reset\n");
1386 tm->cm_complete = mpssas_target_reset_complete;
1389 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1394 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1395 tm->cm_complete_data = (void *)tm;
1397 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1398 mpssas_tm_timeout, tm);
1400 err = mps_map_command(sc, tm);
1402 mpssas_log_command(tm, MPS_RECOVERY,
1403 "error %d sending reset type %u\n",
1411 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1413 struct mps_command *cm;
1414 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1415 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1416 struct mpssas_target *targ;
1418 callout_stop(&tm->cm_callout);
1420 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1421 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1425 * Currently there should be no way we can hit this case. It only
1426 * happens when we have a failure to allocate chain frames, and
1427 * task management commands don't have S/G lists.
1429 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1430 mpssas_log_command(tm, MPS_RECOVERY,
1431 "cm_flags = %#x for abort %p TaskMID %u!\n",
1432 tm->cm_flags, tm, le16toh(req->TaskMID));
1433 mpssas_free_tm(sc, tm);
1437 if (reply == NULL) {
1438 mpssas_log_command(tm, MPS_RECOVERY,
1439 "NULL abort reply for tm %p TaskMID %u\n",
1440 tm, le16toh(req->TaskMID));
1441 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1442 /* this completion was due to a reset, just cleanup */
1444 mpssas_free_tm(sc, tm);
1447 /* we should have gotten a reply. */
1453 mpssas_log_command(tm, MPS_RECOVERY,
1454 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1455 le16toh(req->TaskMID),
1456 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1457 le32toh(reply->TerminationCount));
1459 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1461 /* if there are no more timedout commands, we're done with
1462 * error recovery for this target.
1464 mpssas_log_command(tm, MPS_RECOVERY,
1465 "finished recovery after aborting TaskMID %u\n",
1466 le16toh(req->TaskMID));
1469 mpssas_free_tm(sc, tm);
1471 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1472 /* abort success, but we have more timedout commands to abort */
1473 mpssas_log_command(tm, MPS_RECOVERY,
1474 "continuing recovery after aborting TaskMID %u\n",
1475 le16toh(req->TaskMID));
1477 mpssas_send_abort(sc, tm, cm);
1480 /* we didn't get a command completion, so the abort
1481 * failed as far as we're concerned. escalate.
1483 mpssas_log_command(tm, MPS_RECOVERY,
1484 "abort failed for TaskMID %u tm %p\n",
1485 le16toh(req->TaskMID), tm);
1487 mpssas_send_reset(sc, tm,
1488 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1492 #define MPS_ABORT_TIMEOUT 5
1495 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1497 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1498 struct mpssas_target *targ;
1502 if (targ->handle == 0) {
1503 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1504 __func__, cm->cm_ccb->ccb_h.target_id);
1508 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO,
1509 "Aborting command %p\n", cm);
1511 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1512 req->DevHandle = htole16(targ->handle);
1513 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1514 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1516 /* XXX Need to handle invalid LUNs */
1517 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1519 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1522 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1523 tm->cm_complete = mpssas_abort_complete;
1524 tm->cm_complete_data = (void *)tm;
1525 tm->cm_targ = cm->cm_targ;
1526 tm->cm_lun = cm->cm_lun;
1528 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1529 mpssas_tm_timeout, tm);
1533 err = mps_map_command(sc, tm);
1535 mpssas_log_command(tm, MPS_RECOVERY,
1536 "error %d sending abort for cm %p SMID %u\n",
1537 err, cm, req->TaskMID);
1543 mpssas_scsiio_timeout(void *data)
1545 struct mps_softc *sc;
1546 struct mps_command *cm;
1547 struct mpssas_target *targ;
1549 cm = (struct mps_command *)data;
1553 mtx_assert(&sc->mps_mtx, MA_OWNED);
1555 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc);
1558 * Run the interrupt handler to make sure it's not pending. This
1559 * isn't perfect because the command could have already completed
1560 * and been re-used, though this is unlikely.
1562 mps_intr_locked(sc);
1563 if (cm->cm_state == MPS_CM_STATE_FREE) {
1564 mpssas_log_command(cm, MPS_XINFO,
1565 "SCSI command %p almost timed out\n", cm);
1569 if (cm->cm_ccb == NULL) {
1570 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1574 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n",
1580 /* XXX first, check the firmware state, to see if it's still
1581 * operational. if not, do a diag reset.
1584 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1585 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1586 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1588 if (targ->tm != NULL) {
1589 /* target already in recovery, just queue up another
1590 * timedout command to be processed later.
1592 mps_dprint(sc, MPS_RECOVERY,
1593 "queued timedout cm %p for processing by tm %p\n",
1596 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1597 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1600 /* start recovery by aborting the first timedout command */
1601 mpssas_send_abort(sc, targ->tm, cm);
1604 /* XXX queue this target up for recovery once a TM becomes
1605 * available. The firmware only has a limited number of
1606 * HighPriority credits for the high priority requests used
1607 * for task management, and we ran out.
1609 * Isilon: don't worry about this for now, since we have
1610 * more credits than disks in an enclosure, and limit
1611 * ourselves to one TM per target for recovery.
1613 mps_dprint(sc, MPS_RECOVERY,
1614 "timedout cm %p failed to allocate a tm\n", cm);
1620 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1622 MPI2_SCSI_IO_REQUEST *req;
1623 struct ccb_scsiio *csio;
1624 struct mps_softc *sc;
1625 struct mpssas_target *targ;
1626 struct mpssas_lun *lun;
1627 struct mps_command *cm;
1628 uint8_t i, lba_byte, *ref_tag_addr;
1629 uint16_t eedp_flags;
1630 uint32_t mpi_control;
1634 mtx_assert(&sc->mps_mtx, MA_OWNED);
1637 targ = &sassc->targets[csio->ccb_h.target_id];
1638 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1639 if (targ->handle == 0x0) {
1640 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1641 __func__, csio->ccb_h.target_id);
1642 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1646 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1647 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1648 "supported %u\n", __func__, csio->ccb_h.target_id);
1649 csio->ccb_h.status = CAM_TID_INVALID;
1654 * Sometimes, it is possible to get a command that is not "In
1655 * Progress" and was actually aborted by the upper layer. Check for
1656 * this here and complete the command without error.
1658 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1659 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1660 "target %u\n", __func__, csio->ccb_h.target_id);
1665 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1666 * that the volume has timed out. We want volumes to be enumerated
1667 * until they are deleted/removed, not just failed.
1669 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1670 if (targ->devinfo == 0)
1671 csio->ccb_h.status = CAM_REQ_CMP;
1673 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1678 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1679 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1680 csio->ccb_h.status = CAM_TID_INVALID;
1685 cm = mps_alloc_command(sc);
1687 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1688 xpt_freeze_simq(sassc->sim, 1);
1689 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1691 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1692 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1697 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1698 bzero(req, sizeof(*req));
1699 req->DevHandle = htole16(targ->handle);
1700 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1702 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1703 req->SenseBufferLength = MPS_SENSE_LEN;
1705 req->ChainOffset = 0;
1706 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1711 req->DataLength = htole32(csio->dxfer_len);
1712 req->BidirectionalDataLength = 0;
1713 req->IoFlags = htole16(csio->cdb_len);
1716 /* Note: BiDirectional transfers are not supported */
1717 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1719 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1720 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1723 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1724 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1728 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1732 if (csio->cdb_len == 32)
1733 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1735 * It looks like the hardware doesn't require an explicit tag
1736 * number for each transaction. SAM Task Management not supported
1739 switch (csio->tag_action) {
1740 case MSG_HEAD_OF_Q_TAG:
1741 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1743 case MSG_ORDERED_Q_TAG:
1744 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1747 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1749 case CAM_TAG_ACTION_NONE:
1750 case MSG_SIMPLE_Q_TAG:
1752 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1755 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1756 req->Control = htole32(mpi_control);
1757 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1758 mps_free_command(sc, cm);
1759 ccb->ccb_h.status = CAM_LUN_INVALID;
1764 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1765 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1767 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1768 req->IoFlags = htole16(csio->cdb_len);
1771 * Check if EEDP is supported and enabled. If it is then check if the
1772 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1773 * is formatted for EEDP support. If all of this is true, set CDB up
1774 * for EEDP transfer.
1776 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1777 if (sc->eedp_enabled && eedp_flags) {
1778 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1779 if (lun->lun_id == csio->ccb_h.target_lun) {
1784 if ((lun != NULL) && (lun->eedp_formatted)) {
1785 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1786 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1787 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1788 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1789 req->EEDPFlags = htole16(eedp_flags);
1792 * If CDB less than 32, fill in Primary Ref Tag with
1793 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1794 * already there. Also, set protection bit. FreeBSD
1795 * currently does not support CDBs bigger than 16, but
1796 * the code doesn't hurt, and will be here for the
1799 if (csio->cdb_len != 32) {
1800 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1801 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1802 PrimaryReferenceTag;
1803 for (i = 0; i < 4; i++) {
1805 req->CDB.CDB32[lba_byte + i];
1808 req->CDB.EEDP32.PrimaryReferenceTag =
1809 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1810 req->CDB.EEDP32.PrimaryApplicationTagMask =
1812 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1816 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1817 req->EEDPFlags = htole16(eedp_flags);
1818 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1824 cm->cm_length = csio->dxfer_len;
1825 if (cm->cm_length != 0) {
1827 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1831 cm->cm_sge = &req->SGL;
1832 cm->cm_sglsize = (32 - 24) * 4;
1833 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1834 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1835 cm->cm_complete = mpssas_scsiio_complete;
1836 cm->cm_complete_data = ccb;
1838 cm->cm_lun = csio->ccb_h.target_lun;
1842 * If HBA is a WD and the command is not for a retry, try to build a
1843 * direct I/O message. If failed, or the command is for a retry, send
1844 * the I/O to the IR volume itself.
1846 if (sc->WD_valid_config) {
1847 if (ccb->ccb_h.status != MPS_WD_RETRY) {
1848 mpssas_direct_drive_io(sassc, cm, ccb);
1850 ccb->ccb_h.status = CAM_REQ_INPROG;
1854 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1855 mpssas_scsiio_timeout, cm);
1858 targ->outstanding++;
1859 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1860 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1862 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1863 __func__, cm, ccb, targ->outstanding);
1865 mps_map_command(sc, cm);
1870 mps_response_code(struct mps_softc *sc, u8 response_code)
1874 switch (response_code) {
1875 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1876 desc = "task management request completed";
1878 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1879 desc = "invalid frame";
1881 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1882 desc = "task management request not supported";
1884 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1885 desc = "task management request failed";
1887 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1888 desc = "task management request succeeded";
1890 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1891 desc = "invalid lun";
1894 desc = "overlapped tag attempted";
1896 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1897 desc = "task queued, however not sent to target";
1903 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n",
1904 response_code, desc);
1907 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1910 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1911 Mpi2SCSIIOReply_t *mpi_reply)
1915 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1916 MPI2_IOCSTATUS_MASK;
1917 u8 scsi_state = mpi_reply->SCSIState;
1918 u8 scsi_status = mpi_reply->SCSIStatus;
1919 char *desc_ioc_state = NULL;
1920 char *desc_scsi_status = NULL;
1921 char *desc_scsi_state = sc->tmp_string;
1922 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1924 if (log_info == 0x31170000)
1927 switch (ioc_status) {
1928 case MPI2_IOCSTATUS_SUCCESS:
1929 desc_ioc_state = "success";
1931 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1932 desc_ioc_state = "invalid function";
1934 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1935 desc_ioc_state = "scsi recovered error";
1937 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1938 desc_ioc_state = "scsi invalid dev handle";
1940 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1941 desc_ioc_state = "scsi device not there";
1943 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1944 desc_ioc_state = "scsi data overrun";
1946 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1947 desc_ioc_state = "scsi data underrun";
1949 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1950 desc_ioc_state = "scsi io data error";
1952 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1953 desc_ioc_state = "scsi protocol error";
1955 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1956 desc_ioc_state = "scsi task terminated";
1958 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1959 desc_ioc_state = "scsi residual mismatch";
1961 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1962 desc_ioc_state = "scsi task mgmt failed";
1964 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1965 desc_ioc_state = "scsi ioc terminated";
1967 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1968 desc_ioc_state = "scsi ext terminated";
1970 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1971 desc_ioc_state = "eedp guard error";
1973 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1974 desc_ioc_state = "eedp ref tag error";
1976 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1977 desc_ioc_state = "eedp app tag error";
1980 desc_ioc_state = "unknown";
1984 switch (scsi_status) {
1985 case MPI2_SCSI_STATUS_GOOD:
1986 desc_scsi_status = "good";
1988 case MPI2_SCSI_STATUS_CHECK_CONDITION:
1989 desc_scsi_status = "check condition";
1991 case MPI2_SCSI_STATUS_CONDITION_MET:
1992 desc_scsi_status = "condition met";
1994 case MPI2_SCSI_STATUS_BUSY:
1995 desc_scsi_status = "busy";
1997 case MPI2_SCSI_STATUS_INTERMEDIATE:
1998 desc_scsi_status = "intermediate";
2000 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2001 desc_scsi_status = "intermediate condmet";
2003 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2004 desc_scsi_status = "reservation conflict";
2006 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2007 desc_scsi_status = "command terminated";
2009 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2010 desc_scsi_status = "task set full";
2012 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2013 desc_scsi_status = "aca active";
2015 case MPI2_SCSI_STATUS_TASK_ABORTED:
2016 desc_scsi_status = "task aborted";
2019 desc_scsi_status = "unknown";
2023 desc_scsi_state[0] = '\0';
2025 desc_scsi_state = " ";
2026 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2027 strcat(desc_scsi_state, "response info ");
2028 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2029 strcat(desc_scsi_state, "state terminated ");
2030 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2031 strcat(desc_scsi_state, "no status ");
2032 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2033 strcat(desc_scsi_state, "autosense failed ");
2034 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2035 strcat(desc_scsi_state, "autosense valid ");
2037 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2038 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2039 /* We can add more detail about underflow data here
2042 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
2043 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2044 desc_scsi_state, scsi_state);
2046 if (sc->mps_debug & MPS_XINFO &&
2047 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2048 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
2049 scsi_sense_print(csio);
2050 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
2053 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2054 response_info = le32toh(mpi_reply->ResponseInfo);
2055 response_bytes = (u8 *)&response_info;
2056 mps_response_code(sc,response_bytes[0]);
2061 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
2063 MPI2_SCSI_IO_REPLY *rep;
2065 struct ccb_scsiio *csio;
2066 struct mpssas_softc *sassc;
2067 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2068 u8 *TLR_bits, TLR_on;
2073 mps_dprint(sc, MPS_TRACE,
2074 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2075 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2076 cm->cm_targ->outstanding);
2078 callout_stop(&cm->cm_callout);
2079 mtx_assert(&sc->mps_mtx, MA_OWNED);
2082 ccb = cm->cm_complete_data;
2084 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2086 * XXX KDM if the chain allocation fails, does it matter if we do
2087 * the sync and unload here? It is simpler to do it in every case,
2088 * assuming it doesn't cause problems.
2090 if (cm->cm_data != NULL) {
2091 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2092 dir = BUS_DMASYNC_POSTREAD;
2093 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2094 dir = BUS_DMASYNC_POSTWRITE;
2095 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2096 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2099 cm->cm_targ->completed++;
2100 cm->cm_targ->outstanding--;
2101 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2102 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2104 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2105 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2106 if (cm->cm_reply != NULL)
2107 mpssas_log_command(cm, MPS_RECOVERY,
2108 "completed timedout cm %p ccb %p during recovery "
2109 "ioc %x scsi %x state %x xfer %u\n",
2111 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2112 le32toh(rep->TransferCount));
2114 mpssas_log_command(cm, MPS_RECOVERY,
2115 "completed timedout cm %p ccb %p during recovery\n",
2117 } else if (cm->cm_targ->tm != NULL) {
2118 if (cm->cm_reply != NULL)
2119 mpssas_log_command(cm, MPS_RECOVERY,
2120 "completed cm %p ccb %p during recovery "
2121 "ioc %x scsi %x state %x xfer %u\n",
2123 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2124 le32toh(rep->TransferCount));
2126 mpssas_log_command(cm, MPS_RECOVERY,
2127 "completed cm %p ccb %p during recovery\n",
2129 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2130 mpssas_log_command(cm, MPS_RECOVERY,
2131 "reset completed cm %p ccb %p\n",
2135 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2137 * We ran into an error after we tried to map the command,
2138 * so we're getting a callback without queueing the command
2139 * to the hardware. So we set the status here, and it will
2140 * be retained below. We'll go through the "fast path",
2141 * because there can be no reply when we haven't actually
2142 * gone out to the hardware.
2144 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2147 * Currently the only error included in the mask is
2148 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2149 * chain frames. We need to freeze the queue until we get
2150 * a command that completed without this error, which will
2151 * hopefully have some chain frames attached that we can
2152 * use. If we wanted to get smarter about it, we would
2153 * only unfreeze the queue in this condition when we're
2154 * sure that we're getting some chain frames back. That's
2155 * probably unnecessary.
2157 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2158 xpt_freeze_simq(sassc->sim, 1);
2159 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2160 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2161 "freezing SIM queue\n");
2165 /* Take the fast path to completion */
2166 if (cm->cm_reply == NULL) {
2167 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2168 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2169 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2171 ccb->ccb_h.status = CAM_REQ_CMP;
2172 ccb->csio.scsi_status = SCSI_STATUS_OK;
2174 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2175 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2176 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2177 mps_dprint(sc, MPS_XINFO,
2178 "Unfreezing SIM queue\n");
2183 * There are two scenarios where the status won't be
2184 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2185 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2187 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2189 * Freeze the dev queue so that commands are
2190 * executed in the correct order with after error
2193 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2194 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2196 mps_free_command(sc, cm);
2201 mpssas_log_command(cm, MPS_XINFO,
2202 "ioc %x scsi %x state %x xfer %u\n",
2203 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2204 le32toh(rep->TransferCount));
2207 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2208 * Volume if an error occurred (normal I/O retry). Use the original
2209 * CCB, but set a flag that this will be a retry so that it's sent to
2210 * the original volume. Free the command but reuse the CCB.
2212 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2213 mps_free_command(sc, cm);
2214 ccb->ccb_h.status = MPS_WD_RETRY;
2215 mpssas_action_scsiio(sassc, ccb);
2219 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2220 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2221 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2223 case MPI2_IOCSTATUS_SUCCESS:
2224 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2226 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2227 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2228 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2230 /* Completion failed at the transport level. */
2231 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2232 MPI2_SCSI_STATE_TERMINATED)) {
2233 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2237 /* In a modern packetized environment, an autosense failure
2238 * implies that there's not much else that can be done to
2239 * recover the command.
2241 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2242 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2247 * CAM doesn't care about SAS Response Info data, but if this is
2248 * the state check if TLR should be done. If not, clear the
2249 * TLR_bits for the target.
2251 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2252 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) ==
2253 MPS_SCSI_RI_INVALID_FRAME)) {
2254 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2255 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2259 * Intentionally override the normal SCSI status reporting
2260 * for these two cases. These are likely to happen in a
2261 * multi-initiator environment, and we want to make sure that
2262 * CAM retries these commands rather than fail them.
2264 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2265 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2266 ccb->ccb_h.status = CAM_REQ_ABORTED;
2270 /* Handle normal status and sense */
2271 csio->scsi_status = rep->SCSIStatus;
2272 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2273 ccb->ccb_h.status = CAM_REQ_CMP;
2275 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2277 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2278 int sense_len, returned_sense_len;
2280 returned_sense_len = min(le32toh(rep->SenseCount),
2281 sizeof(struct scsi_sense_data));
2282 if (returned_sense_len < ccb->csio.sense_len)
2283 ccb->csio.sense_resid = ccb->csio.sense_len -
2286 ccb->csio.sense_resid = 0;
2288 sense_len = min(returned_sense_len,
2289 ccb->csio.sense_len - ccb->csio.sense_resid);
2290 bzero(&ccb->csio.sense_data,
2291 sizeof(ccb->csio.sense_data));
2292 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2293 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2297 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2298 * and it's page code 0 (Supported Page List), and there is
2299 * inquiry data, and this is for a sequential access device, and
2300 * the device is an SSP target, and TLR is supported by the
2301 * controller, turn the TLR_bits value ON if page 0x90 is
2304 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2305 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2306 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2307 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2308 (csio->data_ptr != NULL) &&
2309 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2310 (sc->control_TLR) &&
2311 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2312 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2313 vpd_list = (struct scsi_vpd_supported_page_list *)
2315 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2317 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2318 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2319 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2320 csio->cdb_io.cdb_bytes[4];
2321 alloc_len -= csio->resid;
2322 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2323 if (vpd_list->list[i] == 0x90) {
2330 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2331 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2333 * If devinfo is 0 this will be a volume. In that case don't
2334 * tell CAM that the volume is not there. We want volumes to
2335 * be enumerated until they are deleted/removed, not just
2338 if (cm->cm_targ->devinfo == 0)
2339 ccb->ccb_h.status = CAM_REQ_CMP;
2341 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2343 case MPI2_IOCSTATUS_INVALID_SGL:
2344 mps_print_scsiio_cmd(sc, cm);
2345 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2347 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2349 * This is one of the responses that comes back when an I/O
2350 * has been aborted. If it is because of a timeout that we
2351 * initiated, just set the status to CAM_CMD_TIMEOUT.
2352 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2353 * command is the same (it gets retried, subject to the
2354 * retry counter), the only difference is what gets printed
2357 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2358 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2360 ccb->ccb_h.status = CAM_REQ_ABORTED;
2362 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2363 /* resid is ignored for this condition */
2365 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2367 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2368 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2370 * Since these are generally external (i.e. hopefully
2371 * transient transport-related) errors, retry these without
2372 * decrementing the retry count.
2374 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2375 mpssas_log_command(cm, MPS_INFO,
2376 "terminated ioc %x scsi %x state %x xfer %u\n",
2377 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2378 le32toh(rep->TransferCount));
2380 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2381 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2382 case MPI2_IOCSTATUS_INVALID_VPID:
2383 case MPI2_IOCSTATUS_INVALID_FIELD:
2384 case MPI2_IOCSTATUS_INVALID_STATE:
2385 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2386 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2387 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2388 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2389 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2391 mpssas_log_command(cm, MPS_XINFO,
2392 "completed ioc %x scsi %x state %x xfer %u\n",
2393 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2394 le32toh(rep->TransferCount));
2395 csio->resid = cm->cm_length;
2396 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2400 mps_sc_failed_io_info(sc,csio,rep);
2402 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2403 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2404 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2405 mps_dprint(sc, MPS_XINFO, "Command completed, "
2406 "unfreezing SIM queue\n");
2409 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2410 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2411 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2414 mps_free_command(sc, cm);
2418 /* All Request reached here are Endian safe */
2420 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2422 pMpi2SCSIIORequest_t pIO_req;
2423 struct mps_softc *sc = sassc->sc;
2425 uint32_t physLBA, stripe_offset, stripe_unit;
2426 uint32_t io_size, column;
2427 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2430 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2431 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2432 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2433 * bit different than the 10/16 CDBs, handle them separately.
2435 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2436 CDB = pIO_req->CDB.CDB32;
2439 * Handle 6 byte CDBs.
2441 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2442 (CDB[0] == WRITE_6))) {
2444 * Get the transfer size in blocks.
2446 io_size = (cm->cm_length >> sc->DD_block_exponent);
2449 * Get virtual LBA given in the CDB.
2451 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2452 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2455 * Check that LBA range for I/O does not exceed volume's
2458 if ((virtLBA + (uint64_t)io_size - 1) <=
2461 * Check if the I/O crosses a stripe boundary. If not,
2462 * translate the virtual LBA to a physical LBA and set
2463 * the DevHandle for the PhysDisk to be used. If it
2464 * does cross a boundry, do normal I/O. To get the
2465 * right DevHandle to use, get the map number for the
2466 * column, then use that map number to look up the
2467 * DevHandle of the PhysDisk.
2469 stripe_offset = (uint32_t)virtLBA &
2470 (sc->DD_stripe_size - 1);
2471 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2472 physLBA = (uint32_t)virtLBA >>
2473 sc->DD_stripe_exponent;
2474 stripe_unit = physLBA / sc->DD_num_phys_disks;
2475 column = physLBA % sc->DD_num_phys_disks;
2476 pIO_req->DevHandle =
2477 htole16(sc->DD_column_map[column].dev_handle);
2478 /* ???? Is this endian safe*/
2479 cm->cm_desc.SCSIIO.DevHandle =
2482 physLBA = (stripe_unit <<
2483 sc->DD_stripe_exponent) + stripe_offset;
2484 ptrLBA = &pIO_req->CDB.CDB32[1];
2485 physLBA_byte = (uint8_t)(physLBA >> 16);
2486 *ptrLBA = physLBA_byte;
2487 ptrLBA = &pIO_req->CDB.CDB32[2];
2488 physLBA_byte = (uint8_t)(physLBA >> 8);
2489 *ptrLBA = physLBA_byte;
2490 ptrLBA = &pIO_req->CDB.CDB32[3];
2491 physLBA_byte = (uint8_t)physLBA;
2492 *ptrLBA = physLBA_byte;
2495 * Set flag that Direct Drive I/O is
2498 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2505 * Handle 10, 12 or 16 byte CDBs.
2507 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2508 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2509 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2510 (CDB[0] == WRITE_12))) {
2512 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2513 * are 0. If not, this is accessing beyond 2TB so handle it in
2514 * the else section. 10-byte and 12-byte CDB's are OK.
2515 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2516 * ready to accept 12byte CDB for Direct IOs.
2518 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2519 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2520 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2522 * Get the transfer size in blocks.
2524 io_size = (cm->cm_length >> sc->DD_block_exponent);
2527 * Get virtual LBA. Point to correct lower 4 bytes of
2528 * LBA in the CDB depending on command.
2530 lba_idx = ((CDB[0] == READ_12) ||
2531 (CDB[0] == WRITE_12) ||
2532 (CDB[0] == READ_10) ||
2533 (CDB[0] == WRITE_10))? 2 : 6;
2534 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2535 ((uint64_t)CDB[lba_idx + 1] << 16) |
2536 ((uint64_t)CDB[lba_idx + 2] << 8) |
2537 (uint64_t)CDB[lba_idx + 3];
2540 * Check that LBA range for I/O does not exceed volume's
2543 if ((virtLBA + (uint64_t)io_size - 1) <=
2546 * Check if the I/O crosses a stripe boundary.
2547 * If not, translate the virtual LBA to a
2548 * physical LBA and set the DevHandle for the
2549 * PhysDisk to be used. If it does cross a
2550 * boundry, do normal I/O. To get the right
2551 * DevHandle to use, get the map number for the
2552 * column, then use that map number to look up
2553 * the DevHandle of the PhysDisk.
2555 stripe_offset = (uint32_t)virtLBA &
2556 (sc->DD_stripe_size - 1);
2557 if ((stripe_offset + io_size) <=
2558 sc->DD_stripe_size) {
2559 physLBA = (uint32_t)virtLBA >>
2560 sc->DD_stripe_exponent;
2561 stripe_unit = physLBA /
2562 sc->DD_num_phys_disks;
2564 sc->DD_num_phys_disks;
2565 pIO_req->DevHandle =
2566 htole16(sc->DD_column_map[column].
2568 cm->cm_desc.SCSIIO.DevHandle =
2571 physLBA = (stripe_unit <<
2572 sc->DD_stripe_exponent) +
2575 &pIO_req->CDB.CDB32[lba_idx];
2576 physLBA_byte = (uint8_t)(physLBA >> 24);
2577 *ptrLBA = physLBA_byte;
2579 &pIO_req->CDB.CDB32[lba_idx + 1];
2580 physLBA_byte = (uint8_t)(physLBA >> 16);
2581 *ptrLBA = physLBA_byte;
2583 &pIO_req->CDB.CDB32[lba_idx + 2];
2584 physLBA_byte = (uint8_t)(physLBA >> 8);
2585 *ptrLBA = physLBA_byte;
2587 &pIO_req->CDB.CDB32[lba_idx + 3];
2588 physLBA_byte = (uint8_t)physLBA;
2589 *ptrLBA = physLBA_byte;
2592 * Set flag that Direct Drive I/O is
2595 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2600 * 16-byte CDB and the upper 4 bytes of the CDB are not
2601 * 0. Get the transfer size in blocks.
2603 io_size = (cm->cm_length >> sc->DD_block_exponent);
2608 virtLBA = ((uint64_t)CDB[2] << 54) |
2609 ((uint64_t)CDB[3] << 48) |
2610 ((uint64_t)CDB[4] << 40) |
2611 ((uint64_t)CDB[5] << 32) |
2612 ((uint64_t)CDB[6] << 24) |
2613 ((uint64_t)CDB[7] << 16) |
2614 ((uint64_t)CDB[8] << 8) |
2618 * Check that LBA range for I/O does not exceed volume's
2621 if ((virtLBA + (uint64_t)io_size - 1) <=
2624 * Check if the I/O crosses a stripe boundary.
2625 * If not, translate the virtual LBA to a
2626 * physical LBA and set the DevHandle for the
2627 * PhysDisk to be used. If it does cross a
2628 * boundry, do normal I/O. To get the right
2629 * DevHandle to use, get the map number for the
2630 * column, then use that map number to look up
2631 * the DevHandle of the PhysDisk.
2633 stripe_offset = (uint32_t)virtLBA &
2634 (sc->DD_stripe_size - 1);
2635 if ((stripe_offset + io_size) <=
2636 sc->DD_stripe_size) {
2637 physLBA = (uint32_t)(virtLBA >>
2638 sc->DD_stripe_exponent);
2639 stripe_unit = physLBA /
2640 sc->DD_num_phys_disks;
2642 sc->DD_num_phys_disks;
2643 pIO_req->DevHandle =
2644 htole16(sc->DD_column_map[column].
2646 cm->cm_desc.SCSIIO.DevHandle =
2649 physLBA = (stripe_unit <<
2650 sc->DD_stripe_exponent) +
2654 * Set upper 4 bytes of LBA to 0. We
2655 * assume that the phys disks are less
2656 * than 2 TB's in size. Then, set the
2659 pIO_req->CDB.CDB32[2] = 0;
2660 pIO_req->CDB.CDB32[3] = 0;
2661 pIO_req->CDB.CDB32[4] = 0;
2662 pIO_req->CDB.CDB32[5] = 0;
2663 ptrLBA = &pIO_req->CDB.CDB32[6];
2664 physLBA_byte = (uint8_t)(physLBA >> 24);
2665 *ptrLBA = physLBA_byte;
2666 ptrLBA = &pIO_req->CDB.CDB32[7];
2667 physLBA_byte = (uint8_t)(physLBA >> 16);
2668 *ptrLBA = physLBA_byte;
2669 ptrLBA = &pIO_req->CDB.CDB32[8];
2670 physLBA_byte = (uint8_t)(physLBA >> 8);
2671 *ptrLBA = physLBA_byte;
2672 ptrLBA = &pIO_req->CDB.CDB32[9];
2673 physLBA_byte = (uint8_t)physLBA;
2674 *ptrLBA = physLBA_byte;
2677 * Set flag that Direct Drive I/O is
2680 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2687 #if __FreeBSD_version >= 900026
2689 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2691 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2692 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2696 ccb = cm->cm_complete_data;
2699 * Currently there should be no way we can hit this case. It only
2700 * happens when we have a failure to allocate chain frames, and SMP
2701 * commands require two S/G elements only. That should be handled
2702 * in the standard request size.
2704 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2705 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2706 __func__, cm->cm_flags);
2707 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2711 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2713 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2714 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2718 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2719 sasaddr = le32toh(req->SASAddress.Low);
2720 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2722 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2723 MPI2_IOCSTATUS_SUCCESS ||
2724 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2725 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2726 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2727 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2731 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2732 "%#jx completed successfully\n", __func__,
2733 (uintmax_t)sasaddr);
2735 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2736 ccb->ccb_h.status = CAM_REQ_CMP;
2738 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2742 * We sync in both directions because we had DMAs in the S/G list
2743 * in both directions.
2745 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2746 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2747 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2748 mps_free_command(sc, cm);
2753 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2755 struct mps_command *cm;
2756 uint8_t *request, *response;
2757 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2758 struct mps_softc *sc;
2767 * XXX We don't yet support physical addresses here.
2769 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2770 case CAM_DATA_PADDR:
2771 case CAM_DATA_SG_PADDR:
2772 mps_dprint(sc, MPS_ERROR,
2773 "%s: physical addresses not supported\n", __func__);
2774 ccb->ccb_h.status = CAM_REQ_INVALID;
2779 * The chip does not support more than one buffer for the
2780 * request or response.
2782 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2783 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2784 mps_dprint(sc, MPS_ERROR,
2785 "%s: multiple request or response "
2786 "buffer segments not supported for SMP\n",
2788 ccb->ccb_h.status = CAM_REQ_INVALID;
2794 * The CAM_SCATTER_VALID flag was originally implemented
2795 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2796 * We have two. So, just take that flag to mean that we
2797 * might have S/G lists, and look at the S/G segment count
2798 * to figure out whether that is the case for each individual
2801 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2802 bus_dma_segment_t *req_sg;
2804 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2805 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2807 request = ccb->smpio.smp_request;
2809 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2810 bus_dma_segment_t *rsp_sg;
2812 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2813 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2815 response = ccb->smpio.smp_response;
2817 case CAM_DATA_VADDR:
2818 request = ccb->smpio.smp_request;
2819 response = ccb->smpio.smp_response;
2822 ccb->ccb_h.status = CAM_REQ_INVALID;
2827 cm = mps_alloc_command(sc);
2829 mps_dprint(sc, MPS_ERROR,
2830 "%s: cannot allocate command\n", __func__);
2831 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2836 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2837 bzero(req, sizeof(*req));
2838 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2840 /* Allow the chip to use any route to this SAS address. */
2841 req->PhysicalPort = 0xff;
2843 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2845 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2847 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2848 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2850 mpi_init_sge(cm, req, &req->SGL);
2853 * Set up a uio to pass into mps_map_command(). This allows us to
2854 * do one map command, and one busdma call in there.
2856 cm->cm_uio.uio_iov = cm->cm_iovec;
2857 cm->cm_uio.uio_iovcnt = 2;
2858 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2861 * The read/write flag isn't used by busdma, but set it just in
2862 * case. This isn't exactly accurate, either, since we're going in
2865 cm->cm_uio.uio_rw = UIO_WRITE;
2867 cm->cm_iovec[0].iov_base = request;
2868 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2869 cm->cm_iovec[1].iov_base = response;
2870 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2872 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2873 cm->cm_iovec[1].iov_len;
2876 * Trigger a warning message in mps_data_cb() for the user if we
2877 * wind up exceeding two S/G segments. The chip expects one
2878 * segment for the request and another for the response.
2880 cm->cm_max_segs = 2;
2882 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2883 cm->cm_complete = mpssas_smpio_complete;
2884 cm->cm_complete_data = ccb;
2887 * Tell the mapping code that we're using a uio, and that this is
2888 * an SMP passthrough request. There is a little special-case
2889 * logic there (in mps_data_cb()) to handle the bidirectional
2892 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2893 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2895 /* The chip data format is little endian. */
2896 req->SASAddress.High = htole32(sasaddr >> 32);
2897 req->SASAddress.Low = htole32(sasaddr);
2900 * XXX Note that we don't have a timeout/abort mechanism here.
2901 * From the manual, it looks like task management requests only
2902 * work for SCSI IO and SATA passthrough requests. We may need to
2903 * have a mechanism to retry requests in the event of a chip reset
2904 * at least. Hopefully the chip will insure that any errors short
2905 * of that are relayed back to the driver.
2907 error = mps_map_command(sc, cm);
2908 if ((error != 0) && (error != EINPROGRESS)) {
2909 mps_dprint(sc, MPS_ERROR,
2910 "%s: error %d returned from mps_map_command()\n",
2918 mps_free_command(sc, cm);
2919 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2926 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2928 struct mps_softc *sc;
2929 struct mpssas_target *targ;
2930 uint64_t sasaddr = 0;
2935 * Make sure the target exists.
2937 targ = &sassc->targets[ccb->ccb_h.target_id];
2938 if (targ->handle == 0x0) {
2939 mps_dprint(sc, MPS_ERROR,
2940 "%s: target %d does not exist!\n", __func__,
2941 ccb->ccb_h.target_id);
2942 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2948 * If this device has an embedded SMP target, we'll talk to it
2950 * figure out what the expander's address is.
2952 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2953 sasaddr = targ->sasaddr;
2956 * If we don't have a SAS address for the expander yet, try
2957 * grabbing it from the page 0x83 information cached in the
2958 * transport layer for this target. LSI expanders report the
2959 * expander SAS address as the port-associated SAS address in
2960 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2963 * XXX KDM disable this for now, but leave it commented out so that
2964 * it is obvious that this is another possible way to get the SAS
2967 * The parent handle method below is a little more reliable, and
2968 * the other benefit is that it works for devices other than SES
2969 * devices. So you can send a SMP request to a da(4) device and it
2970 * will get routed to the expander that device is attached to.
2971 * (Assuming the da(4) device doesn't contain an SMP target...)
2975 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2979 * If we still don't have a SAS address for the expander, look for
2980 * the parent device of this device, which is probably the expander.
2983 #ifdef OLD_MPS_PROBE
2984 struct mpssas_target *parent_target;
2987 if (targ->parent_handle == 0x0) {
2988 mps_dprint(sc, MPS_ERROR,
2989 "%s: handle %d does not have a valid "
2990 "parent handle!\n", __func__, targ->handle);
2991 ccb->ccb_h.status = CAM_REQ_INVALID;
2994 #ifdef OLD_MPS_PROBE
2995 parent_target = mpssas_find_target_by_handle(sassc, 0,
2996 targ->parent_handle);
2998 if (parent_target == NULL) {
2999 mps_dprint(sc, MPS_ERROR,
3000 "%s: handle %d does not have a valid "
3001 "parent target!\n", __func__, targ->handle);
3002 ccb->ccb_h.status = CAM_REQ_INVALID;
3006 if ((parent_target->devinfo &
3007 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3008 mps_dprint(sc, MPS_ERROR,
3009 "%s: handle %d parent %d does not "
3010 "have an SMP target!\n", __func__,
3011 targ->handle, parent_target->handle);
3012 ccb->ccb_h.status = CAM_REQ_INVALID;
3017 sasaddr = parent_target->sasaddr;
3018 #else /* OLD_MPS_PROBE */
3019 if ((targ->parent_devinfo &
3020 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3021 mps_dprint(sc, MPS_ERROR,
3022 "%s: handle %d parent %d does not "
3023 "have an SMP target!\n", __func__,
3024 targ->handle, targ->parent_handle);
3025 ccb->ccb_h.status = CAM_REQ_INVALID;
3029 if (targ->parent_sasaddr == 0x0) {
3030 mps_dprint(sc, MPS_ERROR,
3031 "%s: handle %d parent handle %d does "
3032 "not have a valid SAS address!\n",
3033 __func__, targ->handle, targ->parent_handle);
3034 ccb->ccb_h.status = CAM_REQ_INVALID;
3038 sasaddr = targ->parent_sasaddr;
3039 #endif /* OLD_MPS_PROBE */
3044 mps_dprint(sc, MPS_INFO,
3045 "%s: unable to find SAS address for handle %d\n",
3046 __func__, targ->handle);
3047 ccb->ccb_h.status = CAM_REQ_INVALID;
3050 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3058 #endif //__FreeBSD_version >= 900026
3061 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3063 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3064 struct mps_softc *sc;
3065 struct mps_command *tm;
3066 struct mpssas_target *targ;
3068 MPS_FUNCTRACE(sassc->sc);
3069 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3072 tm = mps_alloc_command(sc);
3074 mps_dprint(sc, MPS_ERROR,
3075 "command alloc failure in mpssas_action_resetdev\n");
3076 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3081 targ = &sassc->targets[ccb->ccb_h.target_id];
3082 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3083 req->DevHandle = htole16(targ->handle);
3084 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3085 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3087 /* SAS Hard Link Reset / SATA Link Reset */
3088 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3091 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3092 tm->cm_complete = mpssas_resetdev_complete;
3093 tm->cm_complete_data = ccb;
3095 mps_map_command(sc, tm);
3099 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3101 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3105 mtx_assert(&sc->mps_mtx, MA_OWNED);
3107 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3108 ccb = tm->cm_complete_data;
3111 * Currently there should be no way we can hit this case. It only
3112 * happens when we have a failure to allocate chain frames, and
3113 * task management commands don't have S/G lists.
3115 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3116 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3118 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3120 mps_dprint(sc, MPS_ERROR,
3121 "%s: cm_flags = %#x for reset of handle %#04x! "
3122 "This should not happen!\n", __func__, tm->cm_flags,
3124 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3128 mps_dprint(sc, MPS_XINFO,
3129 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3130 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3132 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3133 ccb->ccb_h.status = CAM_REQ_CMP;
3134 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3138 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3142 mpssas_free_tm(sc, tm);
3147 mpssas_poll(struct cam_sim *sim)
3149 struct mpssas_softc *sassc;
3151 sassc = cam_sim_softc(sim);
3153 if (sassc->sc->mps_debug & MPS_TRACE) {
3154 /* frequent debug messages during a panic just slow
3155 * everything down too much.
3157 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3158 sassc->sc->mps_debug &= ~MPS_TRACE;
3161 mps_intr_locked(sassc->sc);
3165 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3168 struct mps_softc *sc;
3170 sc = (struct mps_softc *)callback_arg;
3173 #if (__FreeBSD_version >= 1000006) || \
3174 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3175 case AC_ADVINFO_CHANGED: {
3176 struct mpssas_target *target;
3177 struct mpssas_softc *sassc;
3178 struct scsi_read_capacity_data_long rcap_buf;
3179 struct ccb_dev_advinfo cdai;
3180 struct mpssas_lun *lun;
3185 buftype = (uintptr_t)arg;
3191 * We're only interested in read capacity data changes.
3193 if (buftype != CDAI_TYPE_RCAPLONG)
3197 * We should have a handle for this, but check to make sure.
3199 target = &sassc->targets[xpt_path_target_id(path)];
3200 if (target->handle == 0)
3203 lunid = xpt_path_lun_id(path);
3205 SLIST_FOREACH(lun, &target->luns, lun_link) {
3206 if (lun->lun_id == lunid) {
3212 if (found_lun == 0) {
3213 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3216 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3217 "LUN for EEDP support.\n");
3220 lun->lun_id = lunid;
3221 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3224 bzero(&rcap_buf, sizeof(rcap_buf));
3225 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3226 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3227 cdai.ccb_h.flags = CAM_DIR_IN;
3228 cdai.buftype = CDAI_TYPE_RCAPLONG;
3230 cdai.bufsiz = sizeof(rcap_buf);
3231 cdai.buf = (uint8_t *)&rcap_buf;
3232 xpt_action((union ccb *)&cdai);
3233 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3234 cam_release_devq(cdai.ccb_h.path,
3237 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3238 && (rcap_buf.prot & SRC16_PROT_EN)) {
3239 lun->eedp_formatted = TRUE;
3240 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3242 lun->eedp_formatted = FALSE;
3243 lun->eedp_block_size = 0;
3248 case AC_FOUND_DEVICE: {
3249 struct ccb_getdev *cgd;
3252 mpssas_check_eedp(sc, path, cgd);
3261 #if (__FreeBSD_version < 901503) || \
3262 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3264 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3265 struct ccb_getdev *cgd)
3267 struct mpssas_softc *sassc = sc->sassc;
3268 struct ccb_scsiio *csio;
3269 struct scsi_read_capacity_16 *scsi_cmd;
3270 struct scsi_read_capacity_eedp *rcap_buf;
3272 target_id_t targetid;
3275 struct cam_path *local_path;
3276 struct mpssas_target *target;
3277 struct mpssas_lun *lun;
3282 pathid = cam_sim_path(sassc->sim);
3283 targetid = xpt_path_target_id(path);
3284 lunid = xpt_path_lun_id(path);
3286 target = &sassc->targets[targetid];
3287 if (target->handle == 0x0)
3291 * Determine if the device is EEDP capable.
3293 * If this flag is set in the inquiry data,
3294 * the device supports protection information,
3295 * and must support the 16 byte read
3296 * capacity command, otherwise continue without
3297 * sending read cap 16
3299 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3303 * Issue a READ CAPACITY 16 command. This info
3304 * is used to determine if the LUN is formatted
3307 ccb = xpt_alloc_ccb_nowait();
3309 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3310 "for EEDP support.\n");
3314 if (xpt_create_path(&local_path, xpt_periph,
3315 pathid, targetid, lunid) != CAM_REQ_CMP) {
3316 mps_dprint(sc, MPS_ERROR, "Unable to create "
3317 "path for EEDP support\n");
3323 * If LUN is already in list, don't create a new
3327 SLIST_FOREACH(lun, &target->luns, lun_link) {
3328 if (lun->lun_id == lunid) {
3334 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3337 mps_dprint(sc, MPS_ERROR,
3338 "Unable to alloc LUN for EEDP support.\n");
3339 xpt_free_path(local_path);
3343 lun->lun_id = lunid;
3344 SLIST_INSERT_HEAD(&target->luns, lun,
3348 xpt_path_string(local_path, path_str, sizeof(path_str));
3349 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3350 path_str, target->handle);
3353 * Issue a READ CAPACITY 16 command for the LUN.
3354 * The mpssas_read_cap_done function will load
3355 * the read cap info into the LUN struct.
3357 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3358 M_MPT2, M_NOWAIT | M_ZERO);
3359 if (rcap_buf == NULL) {
3360 mps_dprint(sc, MPS_FAULT,
3361 "Unable to alloc read capacity buffer for EEDP support.\n");
3362 xpt_free_path(ccb->ccb_h.path);
3366 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3368 csio->ccb_h.func_code = XPT_SCSI_IO;
3369 csio->ccb_h.flags = CAM_DIR_IN;
3370 csio->ccb_h.retry_count = 4;
3371 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3372 csio->ccb_h.timeout = 60000;
3373 csio->data_ptr = (uint8_t *)rcap_buf;
3374 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3375 csio->sense_len = MPS_SENSE_LEN;
3376 csio->cdb_len = sizeof(*scsi_cmd);
3377 csio->tag_action = MSG_SIMPLE_Q_TAG;
3379 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3380 bzero(scsi_cmd, sizeof(*scsi_cmd));
3381 scsi_cmd->opcode = 0x9E;
3382 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3383 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3385 ccb->ccb_h.ppriv_ptr1 = sassc;
3390 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3392 struct mpssas_softc *sassc;
3393 struct mpssas_target *target;
3394 struct mpssas_lun *lun;
3395 struct scsi_read_capacity_eedp *rcap_buf;
3397 if (done_ccb == NULL)
3400 /* Driver need to release devq, it Scsi command is
3401 * generated by driver internally.
3402 * Currently there is a single place where driver
3403 * calls scsi command internally. In future if driver
3404 * calls more scsi command internally, it needs to release
3405 * devq internally, since those command will not go back to
3408 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3409 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3410 xpt_release_devq(done_ccb->ccb_h.path,
3411 /*count*/ 1, /*run_queue*/TRUE);
3414 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3417 * Get the LUN ID for the path and look it up in the LUN list for the
3420 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3421 target = &sassc->targets[done_ccb->ccb_h.target_id];
3422 SLIST_FOREACH(lun, &target->luns, lun_link) {
3423 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3427 * Got the LUN in the target's LUN list. Fill it in
3428 * with EEDP info. If the READ CAP 16 command had some
3429 * SCSI error (common if command is not supported), mark
3430 * the lun as not supporting EEDP and set the block size
3433 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3434 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3435 lun->eedp_formatted = FALSE;
3436 lun->eedp_block_size = 0;
3440 if (rcap_buf->protect & 0x01) {
3441 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3442 "target ID %d is formatted for EEDP "
3443 "support.\n", done_ccb->ccb_h.target_lun,
3444 done_ccb->ccb_h.target_id);
3445 lun->eedp_formatted = TRUE;
3446 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3451 // Finished with this CCB and path.
3452 free(rcap_buf, M_MPT2);
3453 xpt_free_path(done_ccb->ccb_h.path);
3454 xpt_free_ccb(done_ccb);
3456 #endif /* (__FreeBSD_version < 901503) || \
3457 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3460 mpssas_startup(struct mps_softc *sc)
3464 * Send the port enable message and set the wait_for_port_enable flag.
3465 * This flag helps to keep the simq frozen until all discovery events
3468 sc->wait_for_port_enable = 1;
3469 mpssas_send_portenable(sc);
3474 mpssas_send_portenable(struct mps_softc *sc)
3476 MPI2_PORT_ENABLE_REQUEST *request;
3477 struct mps_command *cm;
3481 if ((cm = mps_alloc_command(sc)) == NULL)
3483 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3484 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3485 request->MsgFlags = 0;
3487 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3488 cm->cm_complete = mpssas_portenable_complete;
3492 mps_map_command(sc, cm);
3493 mps_dprint(sc, MPS_XINFO,
3494 "mps_send_portenable finished cm %p req %p complete %p\n",
3495 cm, cm->cm_req, cm->cm_complete);
3500 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3502 MPI2_PORT_ENABLE_REPLY *reply;
3503 struct mpssas_softc *sassc;
3509 * Currently there should be no way we can hit this case. It only
3510 * happens when we have a failure to allocate chain frames, and
3511 * port enable commands don't have S/G lists.
3513 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3514 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3515 "This should not happen!\n", __func__, cm->cm_flags);
3518 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3520 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3521 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3522 MPI2_IOCSTATUS_SUCCESS)
3523 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3525 mps_free_command(sc, cm);
3526 if (sc->mps_ich.ich_arg != NULL) {
3527 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n");
3528 config_intrhook_disestablish(&sc->mps_ich);
3529 sc->mps_ich.ich_arg = NULL;
3533 * Get WarpDrive info after discovery is complete but before the scan
3534 * starts. At this point, all devices are ready to be exposed to the
3535 * OS. If devices should be hidden instead, take them out of the
3536 * 'targets' array before the scan. The devinfo for a disk will have
3537 * some info and a volume's will be 0. Use that to remove disks.
3539 mps_wd_config_pages(sc);
3542 * Done waiting for port enable to complete. Decrement the refcount.
3543 * If refcount is 0, discovery is complete and a rescan of the bus can
3544 * take place. Since the simq was explicitly frozen before port
3545 * enable, it must be explicitly released here to keep the
3546 * freeze/release count in sync.
3548 sc->wait_for_port_enable = 0;
3549 sc->port_enable_complete = 1;
3550 wakeup(&sc->port_enable_complete);
3551 mpssas_startup_decrement(sassc);