2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for Avago Technologies (LSI) MPT3 */
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
75 #include <dev/mpr/mpi/mpi2_type.h>
76 #include <dev/mpr/mpi/mpi2.h>
77 #include <dev/mpr/mpi/mpi2_ioc.h>
78 #include <dev/mpr/mpi/mpi2_sas.h>
79 #include <dev/mpr/mpi/mpi2_cnfg.h>
80 #include <dev/mpr/mpi/mpi2_init.h>
81 #include <dev/mpr/mpi/mpi2_tool.h>
82 #include <dev/mpr/mpr_ioctl.h>
83 #include <dev/mpr/mprvar.h>
84 #include <dev/mpr/mpr_table.h>
85 #include <dev/mpr/mpr_sas.h>
87 #define MPRSAS_DISCOVERY_TIMEOUT 20
88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
91 * static array to check SCSI OpCode for EEDP protection bits
93 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
94 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 static uint8_t op_code_prot[256] = {
97 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
106 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
120 static void mprsas_poll(struct cam_sim *sim);
121 static void mprsas_scsiio_timeout(void *data);
122 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
123 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
124 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
125 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
126 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
127 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
128 struct mpr_command *cm);
129 static void mprsas_async(void *callback_arg, uint32_t code,
130 struct cam_path *path, void *arg);
131 #if (__FreeBSD_version < 901503) || \
132 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
133 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
134 struct ccb_getdev *cgd);
135 static void mprsas_read_cap_done(struct cam_periph *periph,
136 union ccb *done_ccb);
138 static int mprsas_send_portenable(struct mpr_softc *sc);
139 static void mprsas_portenable_complete(struct mpr_softc *sc,
140 struct mpr_command *cm);
142 #if __FreeBSD_version >= 900026
143 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
144 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
146 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
147 #endif //FreeBSD_version >= 900026
149 struct mprsas_target *
150 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
153 struct mprsas_target *target;
156 for (i = start; i < sassc->maxtargets; i++) {
157 target = &sassc->targets[i];
158 if (target->handle == handle)
165 /* we need to freeze the simq during attach and diag reset, to avoid failing
166 * commands before device handles have been found by discovery. Since
167 * discovery involves reading config pages and possibly sending commands,
168 * discovery actions may continue even after we receive the end of discovery
169 * event, so refcount discovery actions instead of assuming we can unfreeze
170 * the simq when we get the event.
173 mprsas_startup_increment(struct mprsas_softc *sassc)
175 MPR_FUNCTRACE(sassc->sc);
177 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
178 if (sassc->startup_refcount++ == 0) {
179 /* just starting, freeze the simq */
180 mpr_dprint(sassc->sc, MPR_INIT,
181 "%s freezing simq\n", __func__);
182 #if (__FreeBSD_version >= 1000039) || \
183 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
186 xpt_freeze_simq(sassc->sim, 1);
188 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
189 sassc->startup_refcount);
194 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
196 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
197 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
198 xpt_release_simq(sassc->sim, 1);
199 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
204 mprsas_startup_decrement(struct mprsas_softc *sassc)
206 MPR_FUNCTRACE(sassc->sc);
208 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
209 if (--sassc->startup_refcount == 0) {
210 /* finished all discovery-related actions, release
211 * the simq and rescan for the latest topology.
213 mpr_dprint(sassc->sc, MPR_INIT,
214 "%s releasing simq\n", __func__);
215 sassc->flags &= ~MPRSAS_IN_STARTUP;
216 xpt_release_simq(sassc->sim, 1);
217 #if (__FreeBSD_version >= 1000039) || \
218 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
221 mprsas_rescan_target(sassc->sc, NULL);
224 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
225 sassc->startup_refcount);
229 /* The firmware requires us to stop sending commands when we're doing task
230 * management, so refcount the TMs and keep the simq frozen when any are in
234 mprsas_alloc_tm(struct mpr_softc *sc)
236 struct mpr_command *tm;
239 tm = mpr_alloc_high_priority_command(sc);
244 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
246 int target_id = 0xFFFFFFFF;
253 * For TM's the devq is frozen for the device. Unfreeze it here and
254 * free the resources used for freezing the devq. Must clear the
255 * INRESET flag as well or scsi I/O will not work.
257 if (tm->cm_targ != NULL) {
258 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
259 target_id = tm->cm_targ->tid;
262 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
264 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
265 xpt_free_path(tm->cm_ccb->ccb_h.path);
266 xpt_free_ccb(tm->cm_ccb);
269 mpr_free_high_priority_command(sc, tm);
273 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
275 struct mprsas_softc *sassc = sc->sassc;
277 target_id_t targetid;
281 pathid = cam_sim_path(sassc->sim);
283 targetid = CAM_TARGET_WILDCARD;
285 targetid = targ - sassc->targets;
288 * Allocate a CCB and schedule a rescan.
290 ccb = xpt_alloc_ccb_nowait();
292 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
297 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
298 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
303 if (targetid == CAM_TARGET_WILDCARD)
304 ccb->ccb_h.func_code = XPT_SCAN_BUS;
306 ccb->ccb_h.func_code = XPT_SCAN_TGT;
308 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
313 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
323 /* No need to be in here if debugging isn't enabled */
324 if ((cm->cm_sc->mpr_debug & level) == 0)
327 sbuf_new(&sb, str, sizeof(str), 0);
331 if (cm->cm_ccb != NULL) {
332 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
334 sbuf_cat(&sb, path_str);
335 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
336 scsi_command_string(&cm->cm_ccb->csio, &sb);
337 sbuf_printf(&sb, "length %d ",
338 cm->cm_ccb->csio.dxfer_len);
341 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 cam_sim_name(cm->cm_sc->sassc->sim),
343 cam_sim_unit(cm->cm_sc->sassc->sim),
344 cam_sim_bus(cm->cm_sc->sassc->sim),
345 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 sbuf_vprintf(&sb, fmt, ap);
352 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
358 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
360 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
361 struct mprsas_target *targ;
366 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
367 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 /* XXX retry the remove after the diag reset completes? */
372 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
373 "0x%04x\n", __func__, handle);
374 mprsas_free_tm(sc, tm);
378 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
379 MPI2_IOCSTATUS_SUCCESS) {
380 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
381 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
384 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
385 le32toh(reply->TerminationCount));
386 mpr_free_reply(sc, tm->cm_reply_data);
387 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
389 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
393 * Don't clear target if remove fails because things will get confusing.
394 * Leave the devname and sasaddr intact so that we know to avoid reusing
395 * this target id if possible, and so we can assign the same target id
396 * to this device if it comes back in the future.
398 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
399 MPI2_IOCSTATUS_SUCCESS) {
402 targ->encl_handle = 0x0;
403 targ->encl_level_valid = 0x0;
404 targ->encl_level = 0x0;
405 targ->connector_name[0] = ' ';
406 targ->connector_name[1] = ' ';
407 targ->connector_name[2] = ' ';
408 targ->connector_name[3] = ' ';
409 targ->encl_slot = 0x0;
410 targ->exp_dev_handle = 0x0;
412 targ->linkrate = 0x0;
415 targ->scsi_req_desc_type = 0;
418 mprsas_free_tm(sc, tm);
423 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
424 * Otherwise Volume Delete is same as Bare Drive Removal.
427 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
429 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
430 struct mpr_softc *sc;
431 struct mpr_command *cm;
432 struct mprsas_target *targ = NULL;
434 MPR_FUNCTRACE(sassc->sc);
437 targ = mprsas_find_target_by_handle(sassc, 0, handle);
439 /* FIXME: what is the action? */
440 /* We don't know about this device? */
441 mpr_dprint(sc, MPR_ERROR,
442 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 targ->flags |= MPRSAS_TARGET_INREMOVAL;
448 cm = mprsas_alloc_tm(sc);
450 mpr_dprint(sc, MPR_ERROR,
451 "%s: command alloc failure\n", __func__);
455 mprsas_rescan_target(sc, targ);
457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
458 req->DevHandle = targ->handle;
459 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
460 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
462 /* SAS Hard Link Reset / SATA Link Reset */
463 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 cm->cm_desc.HighPriority.RequestFlags =
468 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
469 cm->cm_complete = mprsas_remove_volume;
470 cm->cm_complete_data = (void *)(uintptr_t)handle;
472 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
473 __func__, targ->tid);
474 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
476 mpr_map_command(sc, cm);
480 * The MPT3 firmware performs debounce on the link to avoid transient link
481 * errors and false removals. When it does decide that link has been lost
482 * and a device needs to go away, it expects that the host will perform a
483 * target reset and then an op remove. The reset has the side-effect of
484 * aborting any outstanding requests for the device, which is required for
485 * the op-remove to succeed. It's not clear if the host should check for
486 * the device coming back alive after the reset.
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
491 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 struct mpr_softc *sc;
493 struct mpr_command *cm;
494 struct mprsas_target *targ = NULL;
496 MPR_FUNCTRACE(sassc->sc);
500 targ = mprsas_find_target_by_handle(sassc, 0, handle);
502 /* FIXME: what is the action? */
503 /* We don't know about this device? */
504 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
509 targ->flags |= MPRSAS_TARGET_INREMOVAL;
511 cm = mprsas_alloc_tm(sc);
513 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
518 mprsas_rescan_target(sc, targ);
520 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 memset(req, 0, sizeof(*req));
522 req->DevHandle = htole16(targ->handle);
523 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
526 /* SAS Hard Link Reset / SATA Link Reset */
527 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 cm->cm_desc.HighPriority.RequestFlags =
532 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 cm->cm_complete = mprsas_remove_device;
534 cm->cm_complete_data = (void *)(uintptr_t)handle;
536 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
537 __func__, targ->tid);
538 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
540 mpr_map_command(sc, cm);
544 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
546 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
547 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
548 struct mprsas_target *targ;
549 struct mpr_command *next_cm;
554 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
555 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 * Currently there should be no way we can hit this case. It only
560 * happens when we have a failure to allocate chain frames, and
561 * task management commands don't have S/G lists.
563 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
564 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
565 "handle %#04x! This should not happen!\n", __func__,
566 tm->cm_flags, handle);
570 /* XXX retry the remove after the diag reset completes? */
571 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
572 "0x%04x\n", __func__, handle);
573 mprsas_free_tm(sc, tm);
577 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
578 MPI2_IOCSTATUS_SUCCESS) {
579 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
580 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
583 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
584 le32toh(reply->TerminationCount));
585 mpr_free_reply(sc, tm->cm_reply_data);
586 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
588 /* Reuse the existing command */
589 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
590 memset(req, 0, sizeof(*req));
591 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
592 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
593 req->DevHandle = htole16(handle);
595 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
596 tm->cm_complete = mprsas_remove_complete;
597 tm->cm_complete_data = (void *)(uintptr_t)handle;
599 mpr_map_command(sc, tm);
601 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
603 if (targ->encl_level_valid) {
604 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
605 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
606 targ->connector_name);
608 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
611 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
612 ccb = tm->cm_complete_data;
613 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
614 mprsas_scsiio_complete(sc, tm);
619 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
621 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
623 struct mprsas_target *targ;
624 struct mprsas_lun *lun;
628 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
629 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
632 * Currently there should be no way we can hit this case. It only
633 * happens when we have a failure to allocate chain frames, and
634 * task management commands don't have S/G lists.
636 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
637 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
638 "handle %#04x! This should not happen!\n", __func__,
639 tm->cm_flags, handle);
640 mprsas_free_tm(sc, tm);
645 /* most likely a chip reset */
646 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
647 "0x%04x\n", __func__, handle);
648 mprsas_free_tm(sc, tm);
652 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
653 __func__, handle, le16toh(reply->IOCStatus));
656 * Don't clear target if remove fails because things will get confusing.
657 * Leave the devname and sasaddr intact so that we know to avoid reusing
658 * this target id if possible, and so we can assign the same target id
659 * to this device if it comes back in the future.
661 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
662 MPI2_IOCSTATUS_SUCCESS) {
665 targ->encl_handle = 0x0;
666 targ->encl_level_valid = 0x0;
667 targ->encl_level = 0x0;
668 targ->connector_name[0] = ' ';
669 targ->connector_name[1] = ' ';
670 targ->connector_name[2] = ' ';
671 targ->connector_name[3] = ' ';
672 targ->encl_slot = 0x0;
673 targ->exp_dev_handle = 0x0;
675 targ->linkrate = 0x0;
678 targ->scsi_req_desc_type = 0;
680 while (!SLIST_EMPTY(&targ->luns)) {
681 lun = SLIST_FIRST(&targ->luns);
682 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
687 mprsas_free_tm(sc, tm);
691 mprsas_register_events(struct mpr_softc *sc)
696 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
697 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
698 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
699 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
700 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
701 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
702 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
703 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
704 setbit(events, MPI2_EVENT_IR_VOLUME);
705 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
706 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
707 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
708 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
710 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
711 &sc->sassc->mprsas_eh);
717 mpr_attach_sas(struct mpr_softc *sc)
719 struct mprsas_softc *sassc;
725 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
727 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
733 * XXX MaxTargets could change during a reinit. Since we don't
734 * resize the targets[] array during such an event, cache the value
735 * of MaxTargets here so that we don't get into trouble later. This
736 * should move into the reinit logic.
738 sassc->maxtargets = sc->facts->MaxTargets;
739 sassc->targets = malloc(sizeof(struct mprsas_target) *
740 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
741 if (!sassc->targets) {
742 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
750 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
751 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
756 unit = device_get_unit(sc->mpr_dev);
757 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
758 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
759 if (sassc->sim == NULL) {
760 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
765 TAILQ_INIT(&sassc->ev_queue);
767 /* Initialize taskqueue for Event Handling */
768 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
769 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
770 taskqueue_thread_enqueue, &sassc->ev_tq);
771 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
772 device_get_nameunit(sc->mpr_dev));
777 * XXX There should be a bus for every port on the adapter, but since
778 * we're just going to fake the topology for now, we'll pretend that
779 * everything is just a target on a single bus.
781 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
782 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
789 * Assume that discovery events will start right away.
791 * Hold off boot until discovery is complete.
793 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
794 sc->sassc->startup_refcount = 0;
795 mprsas_startup_increment(sassc);
797 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
800 * Register for async events so we can determine the EEDP
801 * capabilities of devices.
803 status = xpt_create_path(&sassc->path, /*periph*/NULL,
804 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
806 if (status != CAM_REQ_CMP) {
807 mpr_printf(sc, "Error %#x creating sim path\n", status);
812 #if (__FreeBSD_version >= 1000006) || \
813 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
814 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
816 event = AC_FOUND_DEVICE;
820 * Prior to the CAM locking improvements, we can't call
821 * xpt_register_async() with a particular path specified.
823 * If a path isn't specified, xpt_register_async() will
824 * generate a wildcard path and acquire the XPT lock while
825 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
826 * It will then drop the XPT lock once that is done.
828 * If a path is specified for xpt_register_async(), it will
829 * not acquire and drop the XPT lock around the call to
830 * xpt_action(). xpt_action() asserts that the caller
831 * holds the SIM lock, so the SIM lock has to be held when
832 * calling xpt_register_async() when the path is specified.
834 * But xpt_register_async calls xpt_for_all_devices(),
835 * which calls xptbustraverse(), which will acquire each
836 * SIM lock. When it traverses our particular bus, it will
837 * necessarily acquire the SIM lock, which will lead to a
838 * recursive lock acquisition.
840 * The CAM locking changes fix this problem by acquiring
841 * the XPT topology lock around bus traversal in
842 * xptbustraverse(), so the caller can hold the SIM lock
843 * and it does not cause a recursive lock acquisition.
845 * These __FreeBSD_version values are approximate, especially
846 * for stable/10, which is two months later than the actual
850 #if (__FreeBSD_version < 1000703) || \
851 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
853 status = xpt_register_async(event, mprsas_async, sc,
857 status = xpt_register_async(event, mprsas_async, sc,
861 if (status != CAM_REQ_CMP) {
862 mpr_dprint(sc, MPR_ERROR,
863 "Error %#x registering async handler for "
864 "AC_ADVINFO_CHANGED events\n", status);
865 xpt_free_path(sassc->path);
869 if (status != CAM_REQ_CMP) {
871 * EEDP use is the exception, not the rule.
872 * Warn the user, but do not fail to attach.
874 mpr_printf(sc, "EEDP capabilities disabled.\n");
879 mprsas_register_events(sc);
887 mpr_detach_sas(struct mpr_softc *sc)
889 struct mprsas_softc *sassc;
890 struct mprsas_lun *lun, *lun_tmp;
891 struct mprsas_target *targ;
896 if (sc->sassc == NULL)
900 mpr_deregister_events(sc, sassc->mprsas_eh);
903 * Drain and free the event handling taskqueue with the lock
904 * unheld so that any parallel processing tasks drain properly
905 * without deadlocking.
907 if (sassc->ev_tq != NULL)
908 taskqueue_free(sassc->ev_tq);
910 /* Make sure CAM doesn't wedge if we had to bail out early. */
913 /* Deregister our async handler */
914 if (sassc->path != NULL) {
915 xpt_register_async(0, mprsas_async, sc, sassc->path);
916 xpt_free_path(sassc->path);
920 if (sassc->flags & MPRSAS_IN_STARTUP)
921 xpt_release_simq(sassc->sim, 1);
923 if (sassc->sim != NULL) {
924 xpt_bus_deregister(cam_sim_path(sassc->sim));
925 cam_sim_free(sassc->sim, FALSE);
930 if (sassc->devq != NULL)
931 cam_simq_free(sassc->devq);
933 for (i = 0; i < sassc->maxtargets; i++) {
934 targ = &sassc->targets[i];
935 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
939 free(sassc->targets, M_MPR);
947 mprsas_discovery_end(struct mprsas_softc *sassc)
949 struct mpr_softc *sc = sassc->sc;
953 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
954 callout_stop(&sassc->discovery_callout);
959 mprsas_action(struct cam_sim *sim, union ccb *ccb)
961 struct mprsas_softc *sassc;
963 sassc = cam_sim_softc(sim);
965 MPR_FUNCTRACE(sassc->sc);
966 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
967 ccb->ccb_h.func_code);
968 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
970 switch (ccb->ccb_h.func_code) {
973 struct ccb_pathinq *cpi = &ccb->cpi;
975 cpi->version_num = 1;
976 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
977 cpi->target_sprt = 0;
978 #if (__FreeBSD_version >= 1000039) || \
979 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
980 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
982 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
984 cpi->hba_eng_cnt = 0;
985 cpi->max_target = sassc->maxtargets - 1;
987 cpi->initiator_id = sassc->maxtargets - 1;
988 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
989 strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN);
990 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
991 cpi->unit_number = cam_sim_unit(sim);
992 cpi->bus_id = cam_sim_bus(sim);
994 * XXXSLM-I think this needs to change based on config page or
995 * something instead of hardcoded to 150000.
997 cpi->base_transfer_speed = 150000;
998 cpi->transport = XPORT_SAS;
999 cpi->transport_version = 0;
1000 cpi->protocol = PROTO_SCSI;
1001 cpi->protocol_version = SCSI_REV_SPC;
1002 #if __FreeBSD_version >= 800001
1004 * XXXSLM-probably need to base this number on max SGL's and
1007 cpi->maxio = 256 * 1024;
1009 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1012 case XPT_GET_TRAN_SETTINGS:
1014 struct ccb_trans_settings *cts;
1015 struct ccb_trans_settings_sas *sas;
1016 struct ccb_trans_settings_scsi *scsi;
1017 struct mprsas_target *targ;
1020 sas = &cts->xport_specific.sas;
1021 scsi = &cts->proto_specific.scsi;
1023 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1024 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1025 cts->ccb_h.target_id));
1026 targ = &sassc->targets[cts->ccb_h.target_id];
1027 if (targ->handle == 0x0) {
1028 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1032 cts->protocol_version = SCSI_REV_SPC2;
1033 cts->transport = XPORT_SAS;
1034 cts->transport_version = 0;
1036 sas->valid = CTS_SAS_VALID_SPEED;
1037 switch (targ->linkrate) {
1039 sas->bitrate = 150000;
1042 sas->bitrate = 300000;
1045 sas->bitrate = 600000;
1048 sas->bitrate = 1200000;
1054 cts->protocol = PROTO_SCSI;
1055 scsi->valid = CTS_SCSI_VALID_TQ;
1056 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1058 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1061 case XPT_CALC_GEOMETRY:
1062 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1063 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1066 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1068 mprsas_action_resetdev(sassc, ccb);
1073 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1074 "for abort or reset\n");
1075 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1078 mprsas_action_scsiio(sassc, ccb);
1080 #if __FreeBSD_version >= 900026
1082 mprsas_action_smpio(sassc, ccb);
1086 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1094 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1095 target_id_t target_id, lun_id_t lun_id)
1097 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1098 struct cam_path *path;
1100 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1101 ac_code, target_id, (uintmax_t)lun_id);
1103 if (xpt_create_path(&path, NULL,
1104 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1105 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1110 xpt_async(ac_code, path, NULL);
1111 xpt_free_path(path);
1115 mprsas_complete_all_commands(struct mpr_softc *sc)
1117 struct mpr_command *cm;
1122 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1124 /* complete all commands with a NULL reply */
1125 for (i = 1; i < sc->num_reqs; i++) {
1126 cm = &sc->commands[i];
1127 cm->cm_reply = NULL;
1130 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1131 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1133 if (cm->cm_complete != NULL) {
1134 mprsas_log_command(cm, MPR_RECOVERY,
1135 "completing cm %p state %x ccb %p for diag reset\n",
1136 cm, cm->cm_state, cm->cm_ccb);
1137 cm->cm_complete(sc, cm);
1141 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1142 mprsas_log_command(cm, MPR_RECOVERY,
1143 "waking up cm %p state %x ccb %p for diag reset\n",
1144 cm, cm->cm_state, cm->cm_ccb);
1149 if (cm->cm_sc->io_cmds_active != 0) {
1150 cm->cm_sc->io_cmds_active--;
1152 mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: "
1153 "io_cmds_active is out of sync - resynching to "
1157 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1158 /* this should never happen, but if it does, log */
1159 mprsas_log_command(cm, MPR_RECOVERY,
1160 "cm %p state %x flags 0x%x ccb %p during diag "
1161 "reset\n", cm, cm->cm_state, cm->cm_flags,
1168 mprsas_handle_reinit(struct mpr_softc *sc)
1172 /* Go back into startup mode and freeze the simq, so that CAM
1173 * doesn't send any commands until after we've rediscovered all
1174 * targets and found the proper device handles for them.
1176 * After the reset, portenable will trigger discovery, and after all
1177 * discovery-related activities have finished, the simq will be
1180 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1181 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1182 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1183 mprsas_startup_increment(sc->sassc);
1185 /* notify CAM of a bus reset */
1186 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1189 /* complete and cleanup after all outstanding commands */
1190 mprsas_complete_all_commands(sc);
1192 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1193 __func__, sc->sassc->startup_refcount);
1195 /* zero all the target handles, since they may change after the
1196 * reset, and we have to rediscover all the targets and use the new
1199 for (i = 0; i < sc->sassc->maxtargets; i++) {
1200 if (sc->sassc->targets[i].outstanding != 0)
1201 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1202 i, sc->sassc->targets[i].outstanding);
1203 sc->sassc->targets[i].handle = 0x0;
1204 sc->sassc->targets[i].exp_dev_handle = 0x0;
1205 sc->sassc->targets[i].outstanding = 0;
1206 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1210 mprsas_tm_timeout(void *data)
1212 struct mpr_command *tm = data;
1213 struct mpr_softc *sc = tm->cm_sc;
1215 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1217 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1223 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1225 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1226 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1227 unsigned int cm_count = 0;
1228 struct mpr_command *cm;
1229 struct mprsas_target *targ;
1231 callout_stop(&tm->cm_callout);
1233 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1234 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1238 * Currently there should be no way we can hit this case. It only
1239 * happens when we have a failure to allocate chain frames, and
1240 * task management commands don't have S/G lists.
1242 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1243 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1244 "This should not happen!\n", __func__, tm->cm_flags);
1245 mprsas_free_tm(sc, tm);
1249 if (reply == NULL) {
1250 mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1252 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1253 /* this completion was due to a reset, just cleanup */
1255 mprsas_free_tm(sc, tm);
1258 /* we should have gotten a reply. */
1264 mprsas_log_command(tm, MPR_RECOVERY,
1265 "logical unit reset status 0x%x code 0x%x count %u\n",
1266 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1267 le32toh(reply->TerminationCount));
1269 /* See if there are any outstanding commands for this LUN.
1270 * This could be made more efficient by using a per-LU data
1271 * structure of some sort.
1273 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1274 if (cm->cm_lun == tm->cm_lun)
1278 if (cm_count == 0) {
1279 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1280 "logical unit %u finished recovery after reset\n",
1283 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1286 /* we've finished recovery for this logical unit. check and
1287 * see if some other logical unit has a timedout command
1288 * that needs to be processed.
1290 cm = TAILQ_FIRST(&targ->timedout_commands);
1292 mprsas_send_abort(sc, tm, cm);
1296 mprsas_free_tm(sc, tm);
1300 /* if we still have commands for this LUN, the reset
1301 * effectively failed, regardless of the status reported.
1302 * Escalate to a target reset.
1304 mprsas_log_command(tm, MPR_RECOVERY,
1305 "logical unit reset complete for tm %p, but still have %u "
1306 "command(s)\n", tm, cm_count);
1307 mprsas_send_reset(sc, tm,
1308 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1313 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1315 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1316 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1317 struct mprsas_target *targ;
1319 callout_stop(&tm->cm_callout);
1321 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1322 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1326 * Currently there should be no way we can hit this case. It only
1327 * happens when we have a failure to allocate chain frames, and
1328 * task management commands don't have S/G lists.
1330 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1331 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1332 "reset! This should not happen!\n", __func__, tm->cm_flags);
1333 mprsas_free_tm(sc, tm);
1337 if (reply == NULL) {
1338 mprsas_log_command(tm, MPR_RECOVERY, "NULL reset reply for tm "
1340 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1341 /* this completion was due to a reset, just cleanup */
1343 mprsas_free_tm(sc, tm);
1346 /* we should have gotten a reply. */
1352 mprsas_log_command(tm, MPR_RECOVERY,
1353 "target reset status 0x%x code 0x%x count %u\n",
1354 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1355 le32toh(reply->TerminationCount));
1357 if (targ->outstanding == 0) {
1358 /* we've finished recovery for this target and all
1359 * of its logical units.
1361 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1362 "recovery finished after target reset\n");
1364 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1368 mprsas_free_tm(sc, tm);
1371 /* after a target reset, if this target still has
1372 * outstanding commands, the reset effectively failed,
1373 * regardless of the status reported. escalate.
1375 mprsas_log_command(tm, MPR_RECOVERY,
1376 "target reset complete for tm %p, but still have %u "
1377 "command(s)\n", tm, targ->outstanding);
1382 #define MPR_RESET_TIMEOUT 30
1385 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1387 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1388 struct mprsas_target *target;
1391 target = tm->cm_targ;
1392 if (target->handle == 0) {
1393 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1394 "%d\n", __func__, target->tid);
1398 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1399 req->DevHandle = htole16(target->handle);
1400 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1401 req->TaskType = type;
1403 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1404 /* XXX Need to handle invalid LUNs */
1405 MPR_SET_LUN(req->LUN, tm->cm_lun);
1406 tm->cm_targ->logical_unit_resets++;
1407 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1408 "sending logical unit reset\n");
1409 tm->cm_complete = mprsas_logical_unit_reset_complete;
1410 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1412 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1414 * Target reset method =
1415 * SAS Hard Link Reset / SATA Link Reset
1417 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1418 tm->cm_targ->target_resets++;
1419 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1420 "sending target reset\n");
1421 tm->cm_complete = mprsas_target_reset_complete;
1422 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1425 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1429 mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1431 if (target->encl_level_valid) {
1432 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1433 "connector name (%4s)\n", target->encl_level,
1434 target->encl_slot, target->connector_name);
1438 tm->cm_desc.HighPriority.RequestFlags =
1439 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1440 tm->cm_complete_data = (void *)tm;
1442 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1443 mprsas_tm_timeout, tm);
1445 err = mpr_map_command(sc, tm);
1447 mprsas_log_command(tm, MPR_RECOVERY,
1448 "error %d sending reset type %u\n", err, type);
1455 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1457 struct mpr_command *cm;
1458 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1459 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1460 struct mprsas_target *targ;
1462 callout_stop(&tm->cm_callout);
1464 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1465 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1469 * Currently there should be no way we can hit this case. It only
1470 * happens when we have a failure to allocate chain frames, and
1471 * task management commands don't have S/G lists.
1473 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1474 mprsas_log_command(tm, MPR_RECOVERY,
1475 "cm_flags = %#x for abort %p TaskMID %u!\n",
1476 tm->cm_flags, tm, le16toh(req->TaskMID));
1477 mprsas_free_tm(sc, tm);
1481 if (reply == NULL) {
1482 mprsas_log_command(tm, MPR_RECOVERY,
1483 "NULL abort reply for tm %p TaskMID %u\n",
1484 tm, le16toh(req->TaskMID));
1485 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1486 /* this completion was due to a reset, just cleanup */
1488 mprsas_free_tm(sc, tm);
1491 /* we should have gotten a reply. */
1497 mprsas_log_command(tm, MPR_RECOVERY,
1498 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1499 le16toh(req->TaskMID),
1500 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1501 le32toh(reply->TerminationCount));
1503 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1505 /* if there are no more timedout commands, we're done with
1506 * error recovery for this target.
1508 mprsas_log_command(tm, MPR_RECOVERY,
1509 "finished recovery after aborting TaskMID %u\n",
1510 le16toh(req->TaskMID));
1513 mprsas_free_tm(sc, tm);
1515 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1516 /* abort success, but we have more timedout commands to abort */
1517 mprsas_log_command(tm, MPR_RECOVERY,
1518 "continuing recovery after aborting TaskMID %u\n",
1519 le16toh(req->TaskMID));
1521 mprsas_send_abort(sc, tm, cm);
1524 /* we didn't get a command completion, so the abort
1525 * failed as far as we're concerned. escalate.
1527 mprsas_log_command(tm, MPR_RECOVERY,
1528 "abort failed for TaskMID %u tm %p\n",
1529 le16toh(req->TaskMID), tm);
1531 mprsas_send_reset(sc, tm,
1532 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1536 #define MPR_ABORT_TIMEOUT 5
1539 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1540 struct mpr_command *cm)
1542 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1543 struct mprsas_target *targ;
1547 if (targ->handle == 0) {
1548 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1549 __func__, cm->cm_ccb->ccb_h.target_id);
1553 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1554 "Aborting command %p\n", cm);
1556 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1557 req->DevHandle = htole16(targ->handle);
1558 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1559 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1561 /* XXX Need to handle invalid LUNs */
1562 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1564 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1567 tm->cm_desc.HighPriority.RequestFlags =
1568 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1569 tm->cm_complete = mprsas_abort_complete;
1570 tm->cm_complete_data = (void *)tm;
1571 tm->cm_targ = cm->cm_targ;
1572 tm->cm_lun = cm->cm_lun;
1574 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1575 mprsas_tm_timeout, tm);
1579 mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1580 __func__, targ->tid);
1581 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1583 err = mpr_map_command(sc, tm);
1585 mprsas_log_command(tm, MPR_RECOVERY,
1586 "error %d sending abort for cm %p SMID %u\n",
1587 err, cm, req->TaskMID);
1592 mprsas_scsiio_timeout(void *data)
1594 struct mpr_softc *sc;
1595 struct mpr_command *cm;
1596 struct mprsas_target *targ;
1598 cm = (struct mpr_command *)data;
1602 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1604 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1607 * Run the interrupt handler to make sure it's not pending. This
1608 * isn't perfect because the command could have already completed
1609 * and been re-used, though this is unlikely.
1611 mpr_intr_locked(sc);
1612 if (cm->cm_state == MPR_CM_STATE_FREE) {
1613 mprsas_log_command(cm, MPR_XINFO,
1614 "SCSI command %p almost timed out\n", cm);
1618 if (cm->cm_ccb == NULL) {
1619 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1626 mprsas_log_command(cm, MPR_ERROR, "command timeout cm %p ccb %p target "
1627 "%u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid, targ->handle);
1628 if (targ->encl_level_valid) {
1629 mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1630 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1631 targ->connector_name);
1634 /* XXX first, check the firmware state, to see if it's still
1635 * operational. if not, do a diag reset.
1637 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1638 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1639 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1641 if (targ->tm != NULL) {
1642 /* target already in recovery, just queue up another
1643 * timedout command to be processed later.
1645 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1646 "processing by tm %p\n", cm, targ->tm);
1648 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1649 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1652 /* start recovery by aborting the first timedout command */
1653 mprsas_send_abort(sc, targ->tm, cm);
1656 /* XXX queue this target up for recovery once a TM becomes
1657 * available. The firmware only has a limited number of
1658 * HighPriority credits for the high priority requests used
1659 * for task management, and we ran out.
1661 * Isilon: don't worry about this for now, since we have
1662 * more credits than disks in an enclosure, and limit
1663 * ourselves to one TM per target for recovery.
1665 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p failed to "
1666 "allocate a tm\n", cm);
1671 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1673 MPI2_SCSI_IO_REQUEST *req;
1674 struct ccb_scsiio *csio;
1675 struct mpr_softc *sc;
1676 struct mprsas_target *targ;
1677 struct mprsas_lun *lun;
1678 struct mpr_command *cm;
1679 uint8_t i, lba_byte, *ref_tag_addr;
1680 uint16_t eedp_flags;
1681 uint32_t mpi_control;
1685 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1688 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1689 ("Target %d out of bounds in XPT_SCSI_IO\n",
1690 csio->ccb_h.target_id));
1691 targ = &sassc->targets[csio->ccb_h.target_id];
1692 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1693 if (targ->handle == 0x0) {
1694 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1695 __func__, csio->ccb_h.target_id);
1696 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1700 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1701 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1702 "supported %u\n", __func__, csio->ccb_h.target_id);
1703 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1708 * Sometimes, it is possible to get a command that is not "In
1709 * Progress" and was actually aborted by the upper layer. Check for
1710 * this here and complete the command without error.
1712 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1713 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1714 "target %u\n", __func__, csio->ccb_h.target_id);
1719 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1720 * that the volume has timed out. We want volumes to be enumerated
1721 * until they are deleted/removed, not just failed.
1723 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1724 if (targ->devinfo == 0)
1725 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1727 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1732 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1733 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1734 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1740 * If target has a reset in progress, freeze the devq and return. The
1741 * devq will be released when the TM reset is finished.
1743 if (targ->flags & MPRSAS_TARGET_INRESET) {
1744 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1745 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1746 __func__, targ->tid);
1747 xpt_freeze_devq(ccb->ccb_h.path, 1);
1752 cm = mpr_alloc_command(sc);
1753 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1755 mpr_free_command(sc, cm);
1757 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1758 xpt_freeze_simq(sassc->sim, 1);
1759 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1761 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1762 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1767 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1768 bzero(req, sizeof(*req));
1769 req->DevHandle = htole16(targ->handle);
1770 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1772 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1773 req->SenseBufferLength = MPR_SENSE_LEN;
1775 req->ChainOffset = 0;
1776 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1781 req->DataLength = htole32(csio->dxfer_len);
1782 req->BidirectionalDataLength = 0;
1783 req->IoFlags = htole16(csio->cdb_len);
1786 /* Note: BiDirectional transfers are not supported */
1787 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1789 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1790 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1793 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1794 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1798 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1802 if (csio->cdb_len == 32)
1803 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1805 * It looks like the hardware doesn't require an explicit tag
1806 * number for each transaction. SAM Task Management not supported
1809 switch (csio->tag_action) {
1810 case MSG_HEAD_OF_Q_TAG:
1811 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1813 case MSG_ORDERED_Q_TAG:
1814 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1817 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1819 case CAM_TAG_ACTION_NONE:
1820 case MSG_SIMPLE_Q_TAG:
1822 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1825 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1826 req->Control = htole32(mpi_control);
1828 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1829 mpr_free_command(sc, cm);
1830 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1835 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1836 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1838 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1839 req->IoFlags = htole16(csio->cdb_len);
1842 * Check if EEDP is supported and enabled. If it is then check if the
1843 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1844 * is formatted for EEDP support. If all of this is true, set CDB up
1845 * for EEDP transfer.
1847 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1848 if (sc->eedp_enabled && eedp_flags) {
1849 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1850 if (lun->lun_id == csio->ccb_h.target_lun) {
1855 if ((lun != NULL) && (lun->eedp_formatted)) {
1856 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1857 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1858 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1859 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1860 req->EEDPFlags = htole16(eedp_flags);
1863 * If CDB less than 32, fill in Primary Ref Tag with
1864 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1865 * already there. Also, set protection bit. FreeBSD
1866 * currently does not support CDBs bigger than 16, but
1867 * the code doesn't hurt, and will be here for the
1870 if (csio->cdb_len != 32) {
1871 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1872 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1873 PrimaryReferenceTag;
1874 for (i = 0; i < 4; i++) {
1876 req->CDB.CDB32[lba_byte + i];
1879 req->CDB.EEDP32.PrimaryReferenceTag =
1881 CDB.EEDP32.PrimaryReferenceTag);
1882 req->CDB.EEDP32.PrimaryApplicationTagMask =
1884 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1888 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1889 req->EEDPFlags = htole16(eedp_flags);
1890 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1896 cm->cm_length = csio->dxfer_len;
1897 if (cm->cm_length != 0) {
1899 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1903 cm->cm_sge = &req->SGL;
1904 cm->cm_sglsize = (32 - 24) * 4;
1905 cm->cm_complete = mprsas_scsiio_complete;
1906 cm->cm_complete_data = ccb;
1908 cm->cm_lun = csio->ccb_h.target_lun;
1911 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1912 * and set descriptor type.
1914 if (targ->scsi_req_desc_type ==
1915 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1916 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1917 cm->cm_desc.FastPathSCSIIO.RequestFlags =
1918 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1919 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1921 cm->cm_desc.SCSIIO.RequestFlags =
1922 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1923 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1926 #if __FreeBSD_version >= 1000029
1927 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1928 mprsas_scsiio_timeout, cm, 0);
1929 #else //__FreeBSD_version < 1000029
1930 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1931 mprsas_scsiio_timeout, cm);
1932 #endif //__FreeBSD_version >= 1000029
1935 targ->outstanding++;
1936 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1937 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1939 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1940 __func__, cm, ccb, targ->outstanding);
1942 mpr_map_command(sc, cm);
1947 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1951 switch (response_code) {
1952 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1953 desc = "task management request completed";
1955 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1956 desc = "invalid frame";
1958 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1959 desc = "task management request not supported";
1961 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1962 desc = "task management request failed";
1964 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1965 desc = "task management request succeeded";
1967 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1968 desc = "invalid lun";
1971 desc = "overlapped tag attempted";
1973 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1974 desc = "task queued, however not sent to target";
1980 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1985 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1988 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1989 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1993 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1994 MPI2_IOCSTATUS_MASK;
1995 u8 scsi_state = mpi_reply->SCSIState;
1996 u8 scsi_status = mpi_reply->SCSIStatus;
1997 char *desc_ioc_state = NULL;
1998 char *desc_scsi_status = NULL;
1999 char *desc_scsi_state = sc->tmp_string;
2000 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2002 if (log_info == 0x31170000)
2005 switch (ioc_status) {
2006 case MPI2_IOCSTATUS_SUCCESS:
2007 desc_ioc_state = "success";
2009 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2010 desc_ioc_state = "invalid function";
2012 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2013 desc_ioc_state = "scsi recovered error";
2015 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2016 desc_ioc_state = "scsi invalid dev handle";
2018 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2019 desc_ioc_state = "scsi device not there";
2021 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2022 desc_ioc_state = "scsi data overrun";
2024 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2025 desc_ioc_state = "scsi data underrun";
2027 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2028 desc_ioc_state = "scsi io data error";
2030 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2031 desc_ioc_state = "scsi protocol error";
2033 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2034 desc_ioc_state = "scsi task terminated";
2036 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2037 desc_ioc_state = "scsi residual mismatch";
2039 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2040 desc_ioc_state = "scsi task mgmt failed";
2042 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2043 desc_ioc_state = "scsi ioc terminated";
2045 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2046 desc_ioc_state = "scsi ext terminated";
2048 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2049 desc_ioc_state = "eedp guard error";
2051 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2052 desc_ioc_state = "eedp ref tag error";
2054 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2055 desc_ioc_state = "eedp app tag error";
2057 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
2058 desc_ioc_state = "insufficient power";
2061 desc_ioc_state = "unknown";
2065 switch (scsi_status) {
2066 case MPI2_SCSI_STATUS_GOOD:
2067 desc_scsi_status = "good";
2069 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2070 desc_scsi_status = "check condition";
2072 case MPI2_SCSI_STATUS_CONDITION_MET:
2073 desc_scsi_status = "condition met";
2075 case MPI2_SCSI_STATUS_BUSY:
2076 desc_scsi_status = "busy";
2078 case MPI2_SCSI_STATUS_INTERMEDIATE:
2079 desc_scsi_status = "intermediate";
2081 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2082 desc_scsi_status = "intermediate condmet";
2084 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2085 desc_scsi_status = "reservation conflict";
2087 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2088 desc_scsi_status = "command terminated";
2090 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2091 desc_scsi_status = "task set full";
2093 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2094 desc_scsi_status = "aca active";
2096 case MPI2_SCSI_STATUS_TASK_ABORTED:
2097 desc_scsi_status = "task aborted";
2100 desc_scsi_status = "unknown";
2104 desc_scsi_state[0] = '\0';
2106 desc_scsi_state = " ";
2107 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2108 strcat(desc_scsi_state, "response info ");
2109 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2110 strcat(desc_scsi_state, "state terminated ");
2111 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2112 strcat(desc_scsi_state, "no status ");
2113 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2114 strcat(desc_scsi_state, "autosense failed ");
2115 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2116 strcat(desc_scsi_state, "autosense valid ");
2118 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2119 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2120 if (targ->encl_level_valid) {
2121 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2122 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2123 targ->connector_name);
2125 /* We can add more detail about underflow data here
2128 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2129 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2130 desc_scsi_state, scsi_state);
2132 if (sc->mpr_debug & MPR_XINFO &&
2133 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2134 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2135 scsi_sense_print(csio);
2136 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2139 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2140 response_info = le32toh(mpi_reply->ResponseInfo);
2141 response_bytes = (u8 *)&response_info;
2142 mpr_response_code(sc,response_bytes[0]);
2147 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2149 MPI2_SCSI_IO_REPLY *rep;
2151 struct ccb_scsiio *csio;
2152 struct mprsas_softc *sassc;
2153 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2154 u8 *TLR_bits, TLR_on;
2157 struct mprsas_target *target;
2158 target_id_t target_id;
2161 mpr_dprint(sc, MPR_TRACE,
2162 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2163 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2164 cm->cm_targ->outstanding);
2166 callout_stop(&cm->cm_callout);
2167 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2170 ccb = cm->cm_complete_data;
2172 target_id = csio->ccb_h.target_id;
2173 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2175 * XXX KDM if the chain allocation fails, does it matter if we do
2176 * the sync and unload here? It is simpler to do it in every case,
2177 * assuming it doesn't cause problems.
2179 if (cm->cm_data != NULL) {
2180 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2181 dir = BUS_DMASYNC_POSTREAD;
2182 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2183 dir = BUS_DMASYNC_POSTWRITE;
2184 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2185 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2188 cm->cm_targ->completed++;
2189 cm->cm_targ->outstanding--;
2190 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2191 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2193 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2194 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2195 if (cm->cm_reply != NULL)
2196 mprsas_log_command(cm, MPR_RECOVERY,
2197 "completed timedout cm %p ccb %p during recovery "
2198 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2199 le16toh(rep->IOCStatus), rep->SCSIStatus,
2200 rep->SCSIState, le32toh(rep->TransferCount));
2202 mprsas_log_command(cm, MPR_RECOVERY,
2203 "completed timedout cm %p ccb %p during recovery\n",
2205 } else if (cm->cm_targ->tm != NULL) {
2206 if (cm->cm_reply != NULL)
2207 mprsas_log_command(cm, MPR_RECOVERY,
2208 "completed cm %p ccb %p during recovery "
2209 "ioc %x scsi %x state %x xfer %u\n",
2210 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2211 rep->SCSIStatus, rep->SCSIState,
2212 le32toh(rep->TransferCount));
2214 mprsas_log_command(cm, MPR_RECOVERY,
2215 "completed cm %p ccb %p during recovery\n",
2217 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2218 mprsas_log_command(cm, MPR_RECOVERY,
2219 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2222 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2224 * We ran into an error after we tried to map the command,
2225 * so we're getting a callback without queueing the command
2226 * to the hardware. So we set the status here, and it will
2227 * be retained below. We'll go through the "fast path",
2228 * because there can be no reply when we haven't actually
2229 * gone out to the hardware.
2231 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2234 * Currently the only error included in the mask is
2235 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2236 * chain frames. We need to freeze the queue until we get
2237 * a command that completed without this error, which will
2238 * hopefully have some chain frames attached that we can
2239 * use. If we wanted to get smarter about it, we would
2240 * only unfreeze the queue in this condition when we're
2241 * sure that we're getting some chain frames back. That's
2242 * probably unnecessary.
2244 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2245 xpt_freeze_simq(sassc->sim, 1);
2246 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2247 mpr_dprint(sc, MPR_INFO, "Error sending command, "
2248 "freezing SIM queue\n");
2253 * If this is a Start Stop Unit command and it was issued by the driver
2254 * during shutdown, decrement the refcount to account for all of the
2255 * commands that were sent. All SSU commands should be completed before
2256 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2259 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2260 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2264 /* Take the fast path to completion */
2265 if (cm->cm_reply == NULL) {
2266 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2267 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2268 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2270 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2271 csio->scsi_status = SCSI_STATUS_OK;
2273 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2274 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2275 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2276 mpr_dprint(sc, MPR_XINFO,
2277 "Unfreezing SIM queue\n");
2282 * There are two scenarios where the status won't be
2283 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2284 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2286 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2288 * Freeze the dev queue so that commands are
2289 * executed in the correct order after error
2292 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2293 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2295 mpr_free_command(sc, cm);
2300 mprsas_log_command(cm, MPR_XINFO,
2301 "ioc %x scsi %x state %x xfer %u\n",
2302 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2303 le32toh(rep->TransferCount));
2305 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2306 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2307 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2309 case MPI2_IOCSTATUS_SUCCESS:
2310 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2312 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2313 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2314 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2316 /* Completion failed at the transport level. */
2317 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2318 MPI2_SCSI_STATE_TERMINATED)) {
2319 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2323 /* In a modern packetized environment, an autosense failure
2324 * implies that there's not much else that can be done to
2325 * recover the command.
2327 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2328 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2333 * CAM doesn't care about SAS Response Info data, but if this is
2334 * the state check if TLR should be done. If not, clear the
2335 * TLR_bits for the target.
2337 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2338 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2339 == MPR_SCSI_RI_INVALID_FRAME)) {
2340 sc->mapping_table[target_id].TLR_bits =
2341 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2345 * Intentionally override the normal SCSI status reporting
2346 * for these two cases. These are likely to happen in a
2347 * multi-initiator environment, and we want to make sure that
2348 * CAM retries these commands rather than fail them.
2350 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2351 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2352 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2356 /* Handle normal status and sense */
2357 csio->scsi_status = rep->SCSIStatus;
2358 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2359 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2361 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2363 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2364 int sense_len, returned_sense_len;
2366 returned_sense_len = min(le32toh(rep->SenseCount),
2367 sizeof(struct scsi_sense_data));
2368 if (returned_sense_len < csio->sense_len)
2369 csio->sense_resid = csio->sense_len -
2372 csio->sense_resid = 0;
2374 sense_len = min(returned_sense_len,
2375 csio->sense_len - csio->sense_resid);
2376 bzero(&csio->sense_data, sizeof(csio->sense_data));
2377 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2378 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2382 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2383 * and it's page code 0 (Supported Page List), and there is
2384 * inquiry data, and this is for a sequential access device, and
2385 * the device is an SSP target, and TLR is supported by the
2386 * controller, turn the TLR_bits value ON if page 0x90 is
2389 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2390 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2391 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2392 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2393 (csio->data_ptr != NULL) &&
2394 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2395 (sc->control_TLR) &&
2396 (sc->mapping_table[target_id].device_info &
2397 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2398 vpd_list = (struct scsi_vpd_supported_page_list *)
2400 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2401 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2402 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2403 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2404 csio->cdb_io.cdb_bytes[4];
2405 alloc_len -= csio->resid;
2406 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2407 if (vpd_list->list[i] == 0x90) {
2415 * If this is a SATA direct-access end device, mark it so that
2416 * a SCSI StartStopUnit command will be sent to it when the
2417 * driver is being shutdown.
2419 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2420 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2421 (sc->mapping_table[target_id].device_info &
2422 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2423 ((sc->mapping_table[target_id].device_info &
2424 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2425 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2426 target = &sassc->targets[target_id];
2427 target->supports_SSU = TRUE;
2428 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2432 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2433 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2435 * If devinfo is 0 this will be a volume. In that case don't
2436 * tell CAM that the volume is not there. We want volumes to
2437 * be enumerated until they are deleted/removed, not just
2440 if (cm->cm_targ->devinfo == 0)
2441 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2443 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2445 case MPI2_IOCSTATUS_INVALID_SGL:
2446 mpr_print_scsiio_cmd(sc, cm);
2447 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2449 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2451 * This is one of the responses that comes back when an I/O
2452 * has been aborted. If it is because of a timeout that we
2453 * initiated, just set the status to CAM_CMD_TIMEOUT.
2454 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2455 * command is the same (it gets retried, subject to the
2456 * retry counter), the only difference is what gets printed
2459 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2460 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2462 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2464 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2465 /* resid is ignored for this condition */
2467 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2469 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2470 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2472 * Since these are generally external (i.e. hopefully
2473 * transient transport-related) errors, retry these without
2474 * decrementing the retry count.
2476 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2477 mprsas_log_command(cm, MPR_INFO,
2478 "terminated ioc %x scsi %x state %x xfer %u\n",
2479 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2480 le32toh(rep->TransferCount));
2482 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2483 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2484 case MPI2_IOCSTATUS_INVALID_VPID:
2485 case MPI2_IOCSTATUS_INVALID_FIELD:
2486 case MPI2_IOCSTATUS_INVALID_STATE:
2487 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2488 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2489 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2490 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2491 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2493 mprsas_log_command(cm, MPR_XINFO,
2494 "completed ioc %x scsi %x state %x xfer %u\n",
2495 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2496 le32toh(rep->TransferCount));
2497 csio->resid = cm->cm_length;
2498 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2502 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2504 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2505 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2506 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2507 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2511 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2512 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2513 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2516 mpr_free_command(sc, cm);
2520 #if __FreeBSD_version >= 900026
2522 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2524 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2525 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2529 ccb = cm->cm_complete_data;
2532 * Currently there should be no way we can hit this case. It only
2533 * happens when we have a failure to allocate chain frames, and SMP
2534 * commands require two S/G elements only. That should be handled
2535 * in the standard request size.
2537 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2538 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2539 "request!\n", __func__, cm->cm_flags);
2540 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2544 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2546 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2547 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2551 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2552 sasaddr = le32toh(req->SASAddress.Low);
2553 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2555 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2556 MPI2_IOCSTATUS_SUCCESS ||
2557 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2558 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2559 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2560 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2564 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2565 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2567 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2568 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2570 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2574 * We sync in both directions because we had DMAs in the S/G list
2575 * in both directions.
2577 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2578 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2579 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2580 mpr_free_command(sc, cm);
2585 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2587 struct mpr_command *cm;
2588 uint8_t *request, *response;
2589 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2590 struct mpr_softc *sc;
2598 #if (__FreeBSD_version >= 1000028) || \
2599 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2600 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2601 case CAM_DATA_PADDR:
2602 case CAM_DATA_SG_PADDR:
2604 * XXX We don't yet support physical addresses here.
2606 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2607 "supported\n", __func__);
2608 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2613 * The chip does not support more than one buffer for the
2614 * request or response.
2616 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2617 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2618 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2619 "response buffer segments not supported for SMP\n",
2621 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2627 * The CAM_SCATTER_VALID flag was originally implemented
2628 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2629 * We have two. So, just take that flag to mean that we
2630 * might have S/G lists, and look at the S/G segment count
2631 * to figure out whether that is the case for each individual
2634 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2635 bus_dma_segment_t *req_sg;
2637 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2638 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2640 request = ccb->smpio.smp_request;
2642 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2643 bus_dma_segment_t *rsp_sg;
2645 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2646 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2648 response = ccb->smpio.smp_response;
2650 case CAM_DATA_VADDR:
2651 request = ccb->smpio.smp_request;
2652 response = ccb->smpio.smp_response;
2655 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2659 #else /* __FreeBSD_version < 1000028 */
2661 * XXX We don't yet support physical addresses here.
2663 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2664 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2665 "supported\n", __func__);
2666 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2672 * If the user wants to send an S/G list, check to make sure they
2673 * have single buffers.
2675 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2677 * The chip does not support more than one buffer for the
2678 * request or response.
2680 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2681 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2682 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2683 "response buffer segments not supported for SMP\n",
2685 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2691 * The CAM_SCATTER_VALID flag was originally implemented
2692 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2693 * We have two. So, just take that flag to mean that we
2694 * might have S/G lists, and look at the S/G segment count
2695 * to figure out whether that is the case for each individual
2698 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2699 bus_dma_segment_t *req_sg;
2701 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2702 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2704 request = ccb->smpio.smp_request;
2706 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2707 bus_dma_segment_t *rsp_sg;
2709 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2710 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2712 response = ccb->smpio.smp_response;
2714 request = ccb->smpio.smp_request;
2715 response = ccb->smpio.smp_response;
2717 #endif /* __FreeBSD_version < 1000028 */
2719 cm = mpr_alloc_command(sc);
2721 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2723 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2728 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2729 bzero(req, sizeof(*req));
2730 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2732 /* Allow the chip to use any route to this SAS address. */
2733 req->PhysicalPort = 0xff;
2735 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2737 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2739 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2740 "%#jx\n", __func__, (uintmax_t)sasaddr);
2742 mpr_init_sge(cm, req, &req->SGL);
2745 * Set up a uio to pass into mpr_map_command(). This allows us to
2746 * do one map command, and one busdma call in there.
2748 cm->cm_uio.uio_iov = cm->cm_iovec;
2749 cm->cm_uio.uio_iovcnt = 2;
2750 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2753 * The read/write flag isn't used by busdma, but set it just in
2754 * case. This isn't exactly accurate, either, since we're going in
2757 cm->cm_uio.uio_rw = UIO_WRITE;
2759 cm->cm_iovec[0].iov_base = request;
2760 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2761 cm->cm_iovec[1].iov_base = response;
2762 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2764 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2765 cm->cm_iovec[1].iov_len;
2768 * Trigger a warning message in mpr_data_cb() for the user if we
2769 * wind up exceeding two S/G segments. The chip expects one
2770 * segment for the request and another for the response.
2772 cm->cm_max_segs = 2;
2774 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2775 cm->cm_complete = mprsas_smpio_complete;
2776 cm->cm_complete_data = ccb;
2779 * Tell the mapping code that we're using a uio, and that this is
2780 * an SMP passthrough request. There is a little special-case
2781 * logic there (in mpr_data_cb()) to handle the bidirectional
2784 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2785 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2787 /* The chip data format is little endian. */
2788 req->SASAddress.High = htole32(sasaddr >> 32);
2789 req->SASAddress.Low = htole32(sasaddr);
2792 * XXX Note that we don't have a timeout/abort mechanism here.
2793 * From the manual, it looks like task management requests only
2794 * work for SCSI IO and SATA passthrough requests. We may need to
2795 * have a mechanism to retry requests in the event of a chip reset
2796 * at least. Hopefully the chip will insure that any errors short
2797 * of that are relayed back to the driver.
2799 error = mpr_map_command(sc, cm);
2800 if ((error != 0) && (error != EINPROGRESS)) {
2801 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2802 "mpr_map_command()\n", __func__, error);
2809 mpr_free_command(sc, cm);
2810 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2816 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2818 struct mpr_softc *sc;
2819 struct mprsas_target *targ;
2820 uint64_t sasaddr = 0;
2825 * Make sure the target exists.
2827 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2828 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2829 targ = &sassc->targets[ccb->ccb_h.target_id];
2830 if (targ->handle == 0x0) {
2831 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2832 __func__, ccb->ccb_h.target_id);
2833 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2839 * If this device has an embedded SMP target, we'll talk to it
2841 * figure out what the expander's address is.
2843 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2844 sasaddr = targ->sasaddr;
2847 * If we don't have a SAS address for the expander yet, try
2848 * grabbing it from the page 0x83 information cached in the
2849 * transport layer for this target. LSI expanders report the
2850 * expander SAS address as the port-associated SAS address in
2851 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2854 * XXX KDM disable this for now, but leave it commented out so that
2855 * it is obvious that this is another possible way to get the SAS
2858 * The parent handle method below is a little more reliable, and
2859 * the other benefit is that it works for devices other than SES
2860 * devices. So you can send a SMP request to a da(4) device and it
2861 * will get routed to the expander that device is attached to.
2862 * (Assuming the da(4) device doesn't contain an SMP target...)
2866 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2870 * If we still don't have a SAS address for the expander, look for
2871 * the parent device of this device, which is probably the expander.
2874 #ifdef OLD_MPR_PROBE
2875 struct mprsas_target *parent_target;
2878 if (targ->parent_handle == 0x0) {
2879 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2880 "a valid parent handle!\n", __func__, targ->handle);
2881 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2884 #ifdef OLD_MPR_PROBE
2885 parent_target = mprsas_find_target_by_handle(sassc, 0,
2886 targ->parent_handle);
2888 if (parent_target == NULL) {
2889 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2890 "a valid parent target!\n", __func__, targ->handle);
2891 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2895 if ((parent_target->devinfo &
2896 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2897 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2898 "does not have an SMP target!\n", __func__,
2899 targ->handle, parent_target->handle);
2900 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2904 sasaddr = parent_target->sasaddr;
2905 #else /* OLD_MPR_PROBE */
2906 if ((targ->parent_devinfo &
2907 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2908 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2909 "does not have an SMP target!\n", __func__,
2910 targ->handle, targ->parent_handle);
2911 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2915 if (targ->parent_sasaddr == 0x0) {
2916 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2917 "%d does not have a valid SAS address!\n", __func__,
2918 targ->handle, targ->parent_handle);
2919 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2923 sasaddr = targ->parent_sasaddr;
2924 #endif /* OLD_MPR_PROBE */
2929 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2930 "handle %d\n", __func__, targ->handle);
2931 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2934 mprsas_send_smpcmd(sassc, ccb, sasaddr);
2942 #endif //__FreeBSD_version >= 900026
2945 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2947 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2948 struct mpr_softc *sc;
2949 struct mpr_command *tm;
2950 struct mprsas_target *targ;
2952 MPR_FUNCTRACE(sassc->sc);
2953 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2955 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
2956 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
2958 tm = mpr_alloc_command(sc);
2960 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
2961 "mprsas_action_resetdev\n");
2962 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2967 targ = &sassc->targets[ccb->ccb_h.target_id];
2968 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2969 req->DevHandle = htole16(targ->handle);
2970 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2971 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2973 /* SAS Hard Link Reset / SATA Link Reset */
2974 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2977 tm->cm_desc.HighPriority.RequestFlags =
2978 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2979 tm->cm_complete = mprsas_resetdev_complete;
2980 tm->cm_complete_data = ccb;
2982 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
2983 __func__, targ->tid);
2985 targ->flags |= MPRSAS_TARGET_INRESET;
2987 mpr_map_command(sc, tm);
2991 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2993 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2997 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2999 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3000 ccb = tm->cm_complete_data;
3003 * Currently there should be no way we can hit this case. It only
3004 * happens when we have a failure to allocate chain frames, and
3005 * task management commands don't have S/G lists.
3007 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3008 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3010 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3012 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3013 "handle %#04x! This should not happen!\n", __func__,
3014 tm->cm_flags, req->DevHandle);
3015 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3019 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3020 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3022 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3023 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3024 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3028 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3032 mprsas_free_tm(sc, tm);
3037 mprsas_poll(struct cam_sim *sim)
3039 struct mprsas_softc *sassc;
3041 sassc = cam_sim_softc(sim);
3043 if (sassc->sc->mpr_debug & MPR_TRACE) {
3044 /* frequent debug messages during a panic just slow
3045 * everything down too much.
3047 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3049 sassc->sc->mpr_debug &= ~MPR_TRACE;
3052 mpr_intr_locked(sassc->sc);
3056 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3059 struct mpr_softc *sc;
3061 sc = (struct mpr_softc *)callback_arg;
3064 #if (__FreeBSD_version >= 1000006) || \
3065 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3066 case AC_ADVINFO_CHANGED: {
3067 struct mprsas_target *target;
3068 struct mprsas_softc *sassc;
3069 struct scsi_read_capacity_data_long rcap_buf;
3070 struct ccb_dev_advinfo cdai;
3071 struct mprsas_lun *lun;
3076 buftype = (uintptr_t)arg;
3082 * We're only interested in read capacity data changes.
3084 if (buftype != CDAI_TYPE_RCAPLONG)
3088 * See the comment in mpr_attach_sas() for a detailed
3089 * explanation. In these versions of FreeBSD we register
3090 * for all events and filter out the events that don't
3093 #if (__FreeBSD_version < 1000703) || \
3094 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3095 if (xpt_path_path_id(path) != sassc->sim->path_id)
3100 * We should have a handle for this, but check to make sure.
3102 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3103 ("Target %d out of bounds in mprsas_async\n",
3104 xpt_path_target_id(path)));
3105 target = &sassc->targets[xpt_path_target_id(path)];
3106 if (target->handle == 0)
3109 lunid = xpt_path_lun_id(path);
3111 SLIST_FOREACH(lun, &target->luns, lun_link) {
3112 if (lun->lun_id == lunid) {
3118 if (found_lun == 0) {
3119 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3122 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3123 "LUN for EEDP support.\n");
3126 lun->lun_id = lunid;
3127 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3130 bzero(&rcap_buf, sizeof(rcap_buf));
3131 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3132 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3133 cdai.ccb_h.flags = CAM_DIR_IN;
3134 cdai.buftype = CDAI_TYPE_RCAPLONG;
3135 #if (__FreeBSD_version >= 1100061) || \
3136 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3137 cdai.flags = CDAI_FLAG_NONE;
3141 cdai.bufsiz = sizeof(rcap_buf);
3142 cdai.buf = (uint8_t *)&rcap_buf;
3143 xpt_action((union ccb *)&cdai);
3144 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3145 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3147 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3148 && (rcap_buf.prot & SRC16_PROT_EN)) {
3149 lun->eedp_formatted = TRUE;
3150 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3152 lun->eedp_formatted = FALSE;
3153 lun->eedp_block_size = 0;
3158 case AC_FOUND_DEVICE: {
3159 struct ccb_getdev *cgd;
3162 * See the comment in mpr_attach_sas() for a detailed
3163 * explanation. In these versions of FreeBSD we register
3164 * for all events and filter out the events that don't
3167 #if (__FreeBSD_version < 1000703) || \
3168 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3169 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3174 #if (__FreeBSD_version < 901503) || \
3175 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3176 mprsas_check_eedp(sc, path, cgd);
3185 #if (__FreeBSD_version < 901503) || \
3186 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3188 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3189 struct ccb_getdev *cgd)
3191 struct mprsas_softc *sassc = sc->sassc;
3192 struct ccb_scsiio *csio;
3193 struct scsi_read_capacity_16 *scsi_cmd;
3194 struct scsi_read_capacity_eedp *rcap_buf;
3196 target_id_t targetid;
3199 struct cam_path *local_path;
3200 struct mprsas_target *target;
3201 struct mprsas_lun *lun;
3205 pathid = cam_sim_path(sassc->sim);
3206 targetid = xpt_path_target_id(path);
3207 lunid = xpt_path_lun_id(path);
3209 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3210 "mprsas_check_eedp\n", targetid));
3211 target = &sassc->targets[targetid];
3212 if (target->handle == 0x0)
3216 * Determine if the device is EEDP capable.
3218 * If this flag is set in the inquiry data, the device supports
3219 * protection information, and must support the 16 byte read capacity
3220 * command, otherwise continue without sending read cap 16.
3222 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3226 * Issue a READ CAPACITY 16 command. This info is used to determine if
3227 * the LUN is formatted for EEDP support.
3229 ccb = xpt_alloc_ccb_nowait();
3231 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3236 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3238 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3245 * If LUN is already in list, don't create a new one.
3248 SLIST_FOREACH(lun, &target->luns, lun_link) {
3249 if (lun->lun_id == lunid) {
3255 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3258 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3260 xpt_free_path(local_path);
3264 lun->lun_id = lunid;
3265 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3268 xpt_path_string(local_path, path_str, sizeof(path_str));
3269 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3270 path_str, target->handle);
3273 * Issue a READ CAPACITY 16 command for the LUN. The
3274 * mprsas_read_cap_done function will load the read cap info into the
3277 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3279 if (rcap_buf == NULL) {
3280 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3281 "buffer for EEDP support.\n");
3282 xpt_free_path(ccb->ccb_h.path);
3286 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3288 csio->ccb_h.func_code = XPT_SCSI_IO;
3289 csio->ccb_h.flags = CAM_DIR_IN;
3290 csio->ccb_h.retry_count = 4;
3291 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3292 csio->ccb_h.timeout = 60000;
3293 csio->data_ptr = (uint8_t *)rcap_buf;
3294 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3295 csio->sense_len = MPR_SENSE_LEN;
3296 csio->cdb_len = sizeof(*scsi_cmd);
3297 csio->tag_action = MSG_SIMPLE_Q_TAG;
3299 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3300 bzero(scsi_cmd, sizeof(*scsi_cmd));
3301 scsi_cmd->opcode = 0x9E;
3302 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3303 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3305 ccb->ccb_h.ppriv_ptr1 = sassc;
3310 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3312 struct mprsas_softc *sassc;
3313 struct mprsas_target *target;
3314 struct mprsas_lun *lun;
3315 struct scsi_read_capacity_eedp *rcap_buf;
3317 if (done_ccb == NULL)
3320 /* Driver need to release devq, it Scsi command is
3321 * generated by driver internally.
3322 * Currently there is a single place where driver
3323 * calls scsi command internally. In future if driver
3324 * calls more scsi command internally, it needs to release
3325 * devq internally, since those command will not go back to
3328 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3329 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3330 xpt_release_devq(done_ccb->ccb_h.path,
3331 /*count*/ 1, /*run_queue*/TRUE);
3334 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3337 * Get the LUN ID for the path and look it up in the LUN list for the
3340 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3341 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3342 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3343 target = &sassc->targets[done_ccb->ccb_h.target_id];
3344 SLIST_FOREACH(lun, &target->luns, lun_link) {
3345 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3349 * Got the LUN in the target's LUN list. Fill it in with EEDP
3350 * info. If the READ CAP 16 command had some SCSI error (common
3351 * if command is not supported), mark the lun as not supporting
3352 * EEDP and set the block size to 0.
3354 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3355 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3356 lun->eedp_formatted = FALSE;
3357 lun->eedp_block_size = 0;
3361 if (rcap_buf->protect & 0x01) {
3362 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3363 "%d is formatted for EEDP support.\n",
3364 done_ccb->ccb_h.target_lun,
3365 done_ccb->ccb_h.target_id);
3366 lun->eedp_formatted = TRUE;
3367 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3372 // Finished with this CCB and path.
3373 free(rcap_buf, M_MPR);
3374 xpt_free_path(done_ccb->ccb_h.path);
3375 xpt_free_ccb(done_ccb);
3377 #endif /* (__FreeBSD_version < 901503) || \
3378 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3381 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3382 struct mprsas_target *target, lun_id_t lun_id)
3388 * Set the INRESET flag for this target so that no I/O will be sent to
3389 * the target until the reset has completed. If an I/O request does
3390 * happen, the devq will be frozen. The CCB holds the path which is
3391 * used to release the devq. The devq is released and the CCB is freed
3392 * when the TM completes.
3394 ccb = xpt_alloc_ccb_nowait();
3396 path_id = cam_sim_path(sc->sassc->sim);
3397 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3398 target->tid, lun_id) != CAM_REQ_CMP) {
3402 tm->cm_targ = target;
3403 target->flags |= MPRSAS_TARGET_INRESET;
3409 mprsas_startup(struct mpr_softc *sc)
3412 * Send the port enable message and set the wait_for_port_enable flag.
3413 * This flag helps to keep the simq frozen until all discovery events
3416 sc->wait_for_port_enable = 1;
3417 mprsas_send_portenable(sc);
3422 mprsas_send_portenable(struct mpr_softc *sc)
3424 MPI2_PORT_ENABLE_REQUEST *request;
3425 struct mpr_command *cm;
3429 if ((cm = mpr_alloc_command(sc)) == NULL)
3431 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3432 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3433 request->MsgFlags = 0;
3435 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3436 cm->cm_complete = mprsas_portenable_complete;
3440 mpr_map_command(sc, cm);
3441 mpr_dprint(sc, MPR_XINFO,
3442 "mpr_send_portenable finished cm %p req %p complete %p\n",
3443 cm, cm->cm_req, cm->cm_complete);
3448 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3450 MPI2_PORT_ENABLE_REPLY *reply;
3451 struct mprsas_softc *sassc;
3457 * Currently there should be no way we can hit this case. It only
3458 * happens when we have a failure to allocate chain frames, and
3459 * port enable commands don't have S/G lists.
3461 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3462 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3463 "This should not happen!\n", __func__, cm->cm_flags);
3466 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3468 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3469 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3470 MPI2_IOCSTATUS_SUCCESS)
3471 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3473 mpr_free_command(sc, cm);
3474 if (sc->mpr_ich.ich_arg != NULL) {
3475 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3476 config_intrhook_disestablish(&sc->mpr_ich);
3477 sc->mpr_ich.ich_arg = NULL;
3481 * Done waiting for port enable to complete. Decrement the refcount.
3482 * If refcount is 0, discovery is complete and a rescan of the bus can
3485 sc->wait_for_port_enable = 0;
3486 sc->port_enable_complete = 1;
3487 wakeup(&sc->port_enable_complete);
3488 mprsas_startup_decrement(sassc);
3492 mprsas_check_id(struct mprsas_softc *sassc, int id)
3494 struct mpr_softc *sc = sassc->sc;
3498 ids = &sc->exclude_ids[0];
3499 while((name = strsep(&ids, ",")) != NULL) {
3500 if (name[0] == '\0')
3502 if (strtol(name, NULL, 0) == (long)id)
3510 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3512 struct mprsas_softc *sassc;
3513 struct mprsas_lun *lun, *lun_tmp;
3514 struct mprsas_target *targ;
3519 * The number of targets is based on IOC Facts, so free all of
3520 * the allocated LUNs for each target and then the target buffer
3523 for (i=0; i< maxtargets; i++) {
3524 targ = &sassc->targets[i];
3525 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3529 free(sassc->targets, M_MPR);
3531 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3532 M_MPR, M_WAITOK|M_ZERO);
3533 if (!sassc->targets) {
3534 panic("%s failed to alloc targets with error %d\n",