2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2014 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 /* Communications core for LSI MPT2 */
33 /* TODO Move headers to mprvar */
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/selinfo.h>
39 #include <sys/module.h>
43 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <sys/endian.h>
47 #include <sys/queue.h>
48 #include <sys/kthread.h>
49 #include <sys/taskqueue.h>
52 #include <machine/bus.h>
53 #include <machine/resource.h>
56 #include <machine/stdarg.h>
59 #include <cam/cam_ccb.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_sim.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_periph.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_message.h>
67 #if __FreeBSD_version >= 900026
68 #include <cam/scsi/smp_all.h>
71 #include <dev/mpr/mpi/mpi2_type.h>
72 #include <dev/mpr/mpi/mpi2.h>
73 #include <dev/mpr/mpi/mpi2_ioc.h>
74 #include <dev/mpr/mpi/mpi2_sas.h>
75 #include <dev/mpr/mpi/mpi2_cnfg.h>
76 #include <dev/mpr/mpi/mpi2_init.h>
77 #include <dev/mpr/mpi/mpi2_tool.h>
78 #include <dev/mpr/mpr_ioctl.h>
79 #include <dev/mpr/mprvar.h>
80 #include <dev/mpr/mpr_table.h>
81 #include <dev/mpr/mpr_sas.h>
83 #define MPRSAS_DISCOVERY_TIMEOUT 20
84 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
87 * static array to check SCSI OpCode for EEDP protection bits
89 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
90 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
91 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
92 static uint8_t op_code_prot[256] = {
93 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
96 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
111 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
113 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
114 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
115 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
116 static void mprsas_poll(struct cam_sim *sim);
117 static void mprsas_scsiio_timeout(void *data);
118 static void mprsas_abort_complete(struct mpr_softc *sc,
119 struct mpr_command *cm);
120 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
121 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
123 static void mprsas_resetdev_complete(struct mpr_softc *,
124 struct mpr_command *);
125 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
126 struct mpr_command *cm);
127 static int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm,
129 static void mprsas_async(void *callback_arg, uint32_t code,
130 struct cam_path *path, void *arg);
131 static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
132 struct ccb_getdev *cgd);
133 #if (__FreeBSD_version < 901503) || \
134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136 struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138 union ccb *done_ccb);
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142 struct mpr_command *cm);
144 #if __FreeBSD_version >= 900026
146 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148 union ccb *ccb, uint64_t sasaddr);
150 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
157 struct mprsas_target *target;
160 for (i = start; i < sassc->maxtargets; i++) {
161 target = &sassc->targets[i];
162 if (target->handle == handle)
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170 * commands before device handles have been found by discovery. Since
171 * discovery involves reading config pages and possibly sending commands,
172 * discovery actions may continue even after we receive the end of discovery
173 * event, so refcount discovery actions instead of assuming we can unfreeze
174 * the simq when we get the event.
177 mprsas_startup_increment(struct mprsas_softc *sassc)
179 MPR_FUNCTRACE(sassc->sc);
181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 if (sassc->startup_refcount++ == 0) {
183 /* just starting, freeze the simq */
184 mpr_dprint(sassc->sc, MPR_INIT,
185 "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
190 xpt_freeze_simq(sassc->sim, 1);
192 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 sassc->startup_refcount);
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
200 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 xpt_release_simq(sassc->sim, 1);
203 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
210 MPR_FUNCTRACE(sassc->sc);
212 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 if (--sassc->startup_refcount == 0) {
214 /* finished all discovery-related actions, release
215 * the simq and rescan for the latest topology.
217 mpr_dprint(sassc->sc, MPR_INIT,
218 "%s releasing simq\n", __func__);
219 sassc->flags &= ~MPRSAS_IN_STARTUP;
220 xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
225 mprsas_rescan_target(sassc->sc, NULL);
228 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 sassc->startup_refcount);
233 /* LSI's firmware requires us to stop sending commands when we're doing task
234 * management, so refcount the TMs and keep the simq frozen when any are in
238 mprsas_alloc_tm(struct mpr_softc *sc)
240 struct mpr_command *tm;
243 tm = mpr_alloc_high_priority_command(sc);
245 if (sc->sassc->tm_count++ == 0) {
246 mpr_dprint(sc, MPR_RECOVERY,
247 "%s freezing simq\n", __func__);
248 xpt_freeze_simq(sc->sassc->sim, 1);
250 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
251 sc->sassc->tm_count);
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
259 mpr_dprint(sc, MPR_TRACE, "%s", __func__);
263 /* if there are no TMs in use, we can release the simq. We use our
264 * own refcount so that it's easier for a diag reset to cleanup and
267 if (--sc->sassc->tm_count == 0) {
268 mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__);
269 xpt_release_simq(sc->sassc->sim, 1);
271 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
272 sc->sassc->tm_count);
274 mpr_free_high_priority_command(sc, tm);
278 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
280 struct mprsas_softc *sassc = sc->sassc;
282 target_id_t targetid;
286 pathid = cam_sim_path(sassc->sim);
288 targetid = CAM_TARGET_WILDCARD;
290 targetid = targ - sassc->targets;
293 * Allocate a CCB and schedule a rescan.
295 ccb = xpt_alloc_ccb_nowait();
297 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
301 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
302 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
303 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
308 if (targetid == CAM_TARGET_WILDCARD)
309 ccb->ccb_h.func_code = XPT_SCAN_BUS;
311 ccb->ccb_h.func_code = XPT_SCAN_TGT;
313 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
318 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
328 /* No need to be in here if debugging isn't enabled */
329 if ((cm->cm_sc->mpr_debug & level) == 0)
332 sbuf_new(&sb, str, sizeof(str), 0);
336 if (cm->cm_ccb != NULL) {
337 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
339 sbuf_cat(&sb, path_str);
340 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
341 scsi_command_string(&cm->cm_ccb->csio, &sb);
342 sbuf_printf(&sb, "length %d ",
343 cm->cm_ccb->csio.dxfer_len);
346 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
347 cam_sim_name(cm->cm_sc->sassc->sim),
348 cam_sim_unit(cm->cm_sc->sassc->sim),
349 cam_sim_bus(cm->cm_sc->sassc->sim),
350 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
354 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
355 sbuf_vprintf(&sb, fmt, ap);
357 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
363 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
365 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
366 struct mprsas_target *targ;
371 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
372 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
376 /* XXX retry the remove after the diag reset completes? */
377 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
378 "0x%04x\n", __func__, handle);
379 mprsas_free_tm(sc, tm);
383 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
384 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
385 "device 0x%x\n", reply->IOCStatus, handle);
386 mprsas_free_tm(sc, tm);
390 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
391 reply->TerminationCount);
392 mpr_free_reply(sc, tm->cm_reply_data);
393 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
395 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
399 * Don't clear target if remove fails because things will get confusing.
400 * Leave the devname and sasaddr intact so that we know to avoid reusing
401 * this target id if possible, and so we can assign the same target id
402 * to this device if it comes back in the future.
404 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
407 targ->encl_handle = 0x0;
408 targ->encl_level_valid = 0x0;
409 targ->encl_level = 0x0;
410 targ->connector_name[0] = ' ';
411 targ->connector_name[1] = ' ';
412 targ->connector_name[2] = ' ';
413 targ->connector_name[3] = ' ';
414 targ->encl_slot = 0x0;
415 targ->exp_dev_handle = 0x0;
417 targ->linkrate = 0x0;
420 targ->scsi_req_desc_type = 0;
423 mprsas_free_tm(sc, tm);
428 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
429 * Otherwise Volume Delete is same as Bare Drive Removal.
432 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
434 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
435 struct mpr_softc *sc;
436 struct mpr_command *cm;
437 struct mprsas_target *targ = NULL;
439 MPR_FUNCTRACE(sassc->sc);
442 targ = mprsas_find_target_by_handle(sassc, 0, handle);
444 /* FIXME: what is the action? */
445 /* We don't know about this device? */
446 mpr_dprint(sc, MPR_ERROR,
447 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
451 targ->flags |= MPRSAS_TARGET_INREMOVAL;
453 cm = mprsas_alloc_tm(sc);
455 mpr_dprint(sc, MPR_ERROR,
456 "%s: command alloc failure\n", __func__);
460 mprsas_rescan_target(sc, targ);
462 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
463 req->DevHandle = targ->handle;
464 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
465 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
467 /* SAS Hard Link Reset / SATA Link Reset */
468 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
472 cm->cm_desc.HighPriority.RequestFlags =
473 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
474 cm->cm_complete = mprsas_remove_volume;
475 cm->cm_complete_data = (void *)(uintptr_t)handle;
476 mpr_map_command(sc, cm);
480 * The MPT2 firmware performs debounce on the link to avoid transient link
481 * errors and false removals. When it does decide that link has been lost
482 * and a device needs to go away, it expects that the host will perform a
483 * target reset and then an op remove. The reset has the side-effect of
484 * aborting any outstanding requests for the device, which is required for
485 * the op-remove to succeed. It's not clear if the host should check for
486 * the device coming back alive after the reset.
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
491 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 struct mpr_softc *sc;
493 struct mpr_command *cm;
494 struct mprsas_target *targ = NULL;
496 MPR_FUNCTRACE(sassc->sc);
500 targ = mprsas_find_target_by_handle(sassc, 0, handle);
502 /* FIXME: what is the action? */
503 /* We don't know about this device? */
504 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
509 targ->flags |= MPRSAS_TARGET_INREMOVAL;
511 cm = mprsas_alloc_tm(sc);
513 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
518 mprsas_rescan_target(sc, targ);
520 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 memset(req, 0, sizeof(*req));
522 req->DevHandle = htole16(targ->handle);
523 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
526 /* SAS Hard Link Reset / SATA Link Reset */
527 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 cm->cm_desc.HighPriority.RequestFlags =
532 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 cm->cm_complete = mprsas_remove_device;
534 cm->cm_complete_data = (void *)(uintptr_t)handle;
535 mpr_map_command(sc, cm);
539 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
541 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
542 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
543 struct mprsas_target *targ;
544 struct mpr_command *next_cm;
549 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
550 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
554 * Currently there should be no way we can hit this case. It only
555 * happens when we have a failure to allocate chain frames, and
556 * task management commands don't have S/G lists.
558 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
559 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
560 "handle %#04x! This should not happen!\n", __func__,
561 tm->cm_flags, handle);
562 mprsas_free_tm(sc, tm);
567 /* XXX retry the remove after the diag reset completes? */
568 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
569 "0x%04x\n", __func__, handle);
570 mprsas_free_tm(sc, tm);
574 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
575 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
576 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
577 mprsas_free_tm(sc, tm);
581 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
582 le32toh(reply->TerminationCount));
583 mpr_free_reply(sc, tm->cm_reply_data);
584 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
586 /* Reuse the existing command */
587 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
588 memset(req, 0, sizeof(*req));
589 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
590 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
591 req->DevHandle = htole16(handle);
593 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
594 tm->cm_complete = mprsas_remove_complete;
595 tm->cm_complete_data = (void *)(uintptr_t)handle;
597 mpr_map_command(sc, tm);
599 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
601 if (targ->encl_level_valid) {
602 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
603 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
604 targ->connector_name);
606 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
609 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
610 ccb = tm->cm_complete_data;
611 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
612 mprsas_scsiio_complete(sc, tm);
617 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
619 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
621 struct mprsas_target *targ;
622 struct mprsas_lun *lun;
626 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
627 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
630 * Currently there should be no way we can hit this case. It only
631 * happens when we have a failure to allocate chain frames, and
632 * task management commands don't have S/G lists.
634 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
635 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
636 "handle %#04x! This should not happen!\n", __func__,
637 tm->cm_flags, handle);
638 mprsas_free_tm(sc, tm);
643 /* most likely a chip reset */
644 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
645 "0x%04x\n", __func__, handle);
646 mprsas_free_tm(sc, tm);
650 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
651 __func__, handle, le16toh(reply->IOCStatus));
654 * Don't clear target if remove fails because things will get confusing.
655 * Leave the devname and sasaddr intact so that we know to avoid reusing
656 * this target id if possible, and so we can assign the same target id
657 * to this device if it comes back in the future.
659 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
662 targ->encl_handle = 0x0;
663 targ->encl_level_valid = 0x0;
664 targ->encl_level = 0x0;
665 targ->connector_name[0] = ' ';
666 targ->connector_name[1] = ' ';
667 targ->connector_name[2] = ' ';
668 targ->connector_name[3] = ' ';
669 targ->encl_slot = 0x0;
670 targ->exp_dev_handle = 0x0;
672 targ->linkrate = 0x0;
675 targ->scsi_req_desc_type = 0;
677 while (!SLIST_EMPTY(&targ->luns)) {
678 lun = SLIST_FIRST(&targ->luns);
679 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
684 mprsas_free_tm(sc, tm);
688 mprsas_register_events(struct mpr_softc *sc)
693 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
694 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
695 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
696 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
697 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
698 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
699 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
700 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
701 setbit(events, MPI2_EVENT_IR_VOLUME);
702 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
703 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
704 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
706 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
707 &sc->sassc->mprsas_eh);
713 mpr_attach_sas(struct mpr_softc *sc)
715 struct mprsas_softc *sassc;
721 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
723 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
729 * XXX MaxTargets could change during a reinit. since we don't
730 * resize the targets[] array during such an event, cache the value
731 * of MaxTargets here so that we don't get into trouble later. This
732 * should move into the reinit logic.
734 sassc->maxtargets = sc->facts->MaxTargets;
735 sassc->targets = malloc(sizeof(struct mprsas_target) *
736 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
737 if (!sassc->targets) {
738 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
746 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
747 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
752 unit = device_get_unit(sc->mpr_dev);
753 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
754 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
755 if (sassc->sim == NULL) {
756 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
761 TAILQ_INIT(&sassc->ev_queue);
763 /* Initialize taskqueue for Event Handling */
764 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
765 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
766 taskqueue_thread_enqueue, &sassc->ev_tq);
768 /* Run the task queue with lowest priority */
769 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
770 device_get_nameunit(sc->mpr_dev));
775 * XXX There should be a bus for every port on the adapter, but since
776 * we're just going to fake the topology for now, we'll pretend that
777 * everything is just a target on a single bus.
779 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
780 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
787 * Assume that discovery events will start right away. Freezing
789 * Hold off boot until discovery is complete.
791 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
792 sc->sassc->startup_refcount = 0;
793 mprsas_startup_increment(sassc);
795 callout_init(&sassc->discovery_callout, 1 /*mprafe*/);
800 * Register for async events so we can determine the EEDP
801 * capabilities of devices.
803 status = xpt_create_path(&sassc->path, /*periph*/NULL,
804 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
806 if (status != CAM_REQ_CMP) {
807 mpr_printf(sc, "Error %#x creating sim path\n", status);
812 #if (__FreeBSD_version >= 1000006) || \
813 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
814 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
816 event = AC_FOUND_DEVICE;
820 * Prior to the CAM locking improvements, we can't call
821 * xpt_register_async() with a particular path specified.
823 * If a path isn't specified, xpt_register_async() will
824 * generate a wildcard path and acquire the XPT lock while
825 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
826 * It will then drop the XPT lock once that is done.
828 * If a path is specified for xpt_register_async(), it will
829 * not acquire and drop the XPT lock around the call to
830 * xpt_action(). xpt_action() asserts that the caller
831 * holds the SIM lock, so the SIM lock has to be held when
832 * calling xpt_register_async() when the path is specified.
834 * But xpt_register_async calls xpt_for_all_devices(),
835 * which calls xptbustraverse(), which will acquire each
836 * SIM lock. When it traverses our particular bus, it will
837 * necessarily acquire the SIM lock, which will lead to a
838 * recursive lock acquisition.
840 * The CAM locking changes fix this problem by acquiring
841 * the XPT topology lock around bus traversal in
842 * xptbustraverse(), so the caller can hold the SIM lock
843 * and it does not cause a recursive lock acquisition.
845 * These __FreeBSD_version values are approximate, especially
846 * for stable/10, which is two months later than the actual
850 #if (__FreeBSD_version < 1000703) || \
851 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
853 status = xpt_register_async(event, mprsas_async, sc,
857 status = xpt_register_async(event, mprsas_async, sc,
861 if (status != CAM_REQ_CMP) {
862 mpr_dprint(sc, MPR_ERROR,
863 "Error %#x registering async handler for "
864 "AC_ADVINFO_CHANGED events\n", status);
865 xpt_free_path(sassc->path);
869 if (status != CAM_REQ_CMP) {
871 * EEDP use is the exception, not the rule.
872 * Warn the user, but do not fail to attach.
874 mpr_printf(sc, "EEDP capabilities disabled.\n");
879 mprsas_register_events(sc);
887 mpr_detach_sas(struct mpr_softc *sc)
889 struct mprsas_softc *sassc;
890 struct mprsas_lun *lun, *lun_tmp;
891 struct mprsas_target *targ;
896 if (sc->sassc == NULL)
900 mpr_deregister_events(sc, sassc->mprsas_eh);
903 * Drain and free the event handling taskqueue with the lock
904 * unheld so that any parallel processing tasks drain properly
905 * without deadlocking.
907 if (sassc->ev_tq != NULL)
908 taskqueue_free(sassc->ev_tq);
910 /* Make sure CAM doesn't wedge if we had to bail out early. */
913 /* Deregister our async handler */
914 if (sassc->path != NULL) {
915 xpt_register_async(0, mprsas_async, sc, sassc->path);
916 xpt_free_path(sassc->path);
920 if (sassc->flags & MPRSAS_IN_STARTUP)
921 xpt_release_simq(sassc->sim, 1);
923 if (sassc->sim != NULL) {
924 xpt_bus_deregister(cam_sim_path(sassc->sim));
925 cam_sim_free(sassc->sim, FALSE);
928 sassc->flags |= MPRSAS_SHUTDOWN;
931 if (sassc->devq != NULL)
932 cam_simq_free(sassc->devq);
934 for (i = 0; i < sassc->maxtargets; i++) {
935 targ = &sassc->targets[i];
936 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
940 free(sassc->targets, M_MPR);
948 mprsas_discovery_end(struct mprsas_softc *sassc)
950 struct mpr_softc *sc = sassc->sc;
954 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
955 callout_stop(&sassc->discovery_callout);
960 mprsas_action(struct cam_sim *sim, union ccb *ccb)
962 struct mprsas_softc *sassc;
964 sassc = cam_sim_softc(sim);
966 MPR_FUNCTRACE(sassc->sc);
967 mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__,
968 ccb->ccb_h.func_code);
969 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
971 switch (ccb->ccb_h.func_code) {
974 struct ccb_pathinq *cpi = &ccb->cpi;
976 cpi->version_num = 1;
977 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
978 cpi->target_sprt = 0;
979 #if (__FreeBSD_version >= 1000039) || \
980 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
981 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
983 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
985 cpi->hba_eng_cnt = 0;
986 cpi->max_target = sassc->maxtargets - 1;
988 cpi->initiator_id = sassc->maxtargets - 1;
989 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
990 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
991 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
992 cpi->unit_number = cam_sim_unit(sim);
993 cpi->bus_id = cam_sim_bus(sim);
995 * XXXSLM-I think this needs to change based on config page or
996 * something instead of hardcoded to 150000.
998 cpi->base_transfer_speed = 150000;
999 cpi->transport = XPORT_SAS;
1000 cpi->transport_version = 0;
1001 cpi->protocol = PROTO_SCSI;
1002 cpi->protocol_version = SCSI_REV_SPC;
1003 #if __FreeBSD_version >= 800001
1005 * XXXSLM-probably need to base this number on max SGL's and
1008 cpi->maxio = 256 * 1024;
1010 cpi->ccb_h.status = CAM_REQ_CMP;
1013 case XPT_GET_TRAN_SETTINGS:
1015 struct ccb_trans_settings *cts;
1016 struct ccb_trans_settings_sas *sas;
1017 struct ccb_trans_settings_scsi *scsi;
1018 struct mprsas_target *targ;
1021 sas = &cts->xport_specific.sas;
1022 scsi = &cts->proto_specific.scsi;
1024 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1025 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1026 cts->ccb_h.target_id));
1027 targ = &sassc->targets[cts->ccb_h.target_id];
1028 if (targ->handle == 0x0) {
1029 cts->ccb_h.status = CAM_DEV_NOT_THERE;
1033 cts->protocol_version = SCSI_REV_SPC2;
1034 cts->transport = XPORT_SAS;
1035 cts->transport_version = 0;
1037 sas->valid = CTS_SAS_VALID_SPEED;
1038 switch (targ->linkrate) {
1040 sas->bitrate = 150000;
1043 sas->bitrate = 300000;
1046 sas->bitrate = 600000;
1049 sas->bitrate = 1200000;
1055 cts->protocol = PROTO_SCSI;
1056 scsi->valid = CTS_SCSI_VALID_TQ;
1057 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1059 cts->ccb_h.status = CAM_REQ_CMP;
1062 case XPT_CALC_GEOMETRY:
1063 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1064 ccb->ccb_h.status = CAM_REQ_CMP;
1067 mpr_dprint(sassc->sc, MPR_XINFO,
1068 "mprsas_action XPT_RESET_DEV\n");
1069 mprsas_action_resetdev(sassc, ccb);
1074 mpr_dprint(sassc->sc, MPR_XINFO,
1075 "mprsas_action faking success for abort or reset\n");
1076 ccb->ccb_h.status = CAM_REQ_CMP;
1079 mprsas_action_scsiio(sassc, ccb);
1081 #if __FreeBSD_version >= 900026
1083 mprsas_action_smpio(sassc, ccb);
1087 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1095 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1096 target_id_t target_id, lun_id_t lun_id)
1098 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1099 struct cam_path *path;
1101 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1102 ac_code, target_id, (uintmax_t)lun_id);
1104 if (xpt_create_path(&path, NULL,
1105 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1106 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1111 xpt_async(ac_code, path, NULL);
1112 xpt_free_path(path);
1116 mprsas_complete_all_commands(struct mpr_softc *sc)
1118 struct mpr_command *cm;
1123 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1125 /* complete all commands with a NULL reply */
1126 for (i = 1; i < sc->num_reqs; i++) {
1127 cm = &sc->commands[i];
1128 cm->cm_reply = NULL;
1131 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1132 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1134 if (cm->cm_complete != NULL) {
1135 mprsas_log_command(cm, MPR_RECOVERY,
1136 "completing cm %p state %x ccb %p for diag reset\n",
1137 cm, cm->cm_state, cm->cm_ccb);
1138 cm->cm_complete(sc, cm);
1142 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1143 mprsas_log_command(cm, MPR_RECOVERY,
1144 "waking up cm %p state %x ccb %p for diag reset\n",
1145 cm, cm->cm_state, cm->cm_ccb);
1150 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1151 /* this should never happen, but if it does, log */
1152 mprsas_log_command(cm, MPR_RECOVERY,
1153 "cm %p state %x flags 0x%x ccb %p during diag "
1154 "reset\n", cm, cm->cm_state, cm->cm_flags,
1161 mprsas_handle_reinit(struct mpr_softc *sc)
1165 /* Go back into startup mode and freeze the simq, so that CAM
1166 * doesn't send any commands until after we've rediscovered all
1167 * targets and found the proper device handles for them.
1169 * After the reset, portenable will trigger discovery, and after all
1170 * discovery-related activities have finished, the simq will be
1173 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1174 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1175 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1176 mprsas_startup_increment(sc->sassc);
1178 /* notify CAM of a bus reset */
1179 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1182 /* complete and cleanup after all outstanding commands */
1183 mprsas_complete_all_commands(sc);
1185 mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command "
1186 "completion\n", __func__, sc->sassc->startup_refcount,
1187 sc->sassc->tm_count);
1189 /* zero all the target handles, since they may change after the
1190 * reset, and we have to rediscover all the targets and use the new
1193 for (i = 0; i < sc->sassc->maxtargets; i++) {
1194 if (sc->sassc->targets[i].outstanding != 0)
1195 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1196 i, sc->sassc->targets[i].outstanding);
1197 sc->sassc->targets[i].handle = 0x0;
1198 sc->sassc->targets[i].exp_dev_handle = 0x0;
1199 sc->sassc->targets[i].outstanding = 0;
1200 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1204 mprsas_tm_timeout(void *data)
1206 struct mpr_command *tm = data;
1207 struct mpr_softc *sc = tm->cm_sc;
1209 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1211 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1212 "task mgmt %p timed out\n", tm);
1217 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1218 struct mpr_command *tm)
1220 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1221 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1222 unsigned int cm_count = 0;
1223 struct mpr_command *cm;
1224 struct mprsas_target *targ;
1226 callout_stop(&tm->cm_callout);
1228 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1229 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1233 * Currently there should be no way we can hit this case. It only
1234 * happens when we have a failure to allocate chain frames, and
1235 * task management commands don't have S/G lists.
1237 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1238 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1239 "This should not happen!\n", __func__, tm->cm_flags);
1240 mprsas_free_tm(sc, tm);
1244 if (reply == NULL) {
1245 mprsas_log_command(tm, MPR_RECOVERY,
1246 "NULL reset reply for tm %p\n", tm);
1247 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1248 /* this completion was due to a reset, just cleanup */
1249 targ->flags &= ~MPRSAS_TARGET_INRESET;
1251 mprsas_free_tm(sc, tm);
1254 /* we should have gotten a reply. */
1260 mprsas_log_command(tm, MPR_RECOVERY,
1261 "logical unit reset status 0x%x code 0x%x count %u\n",
1262 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1263 le32toh(reply->TerminationCount));
1265 /* See if there are any outstanding commands for this LUN.
1266 * This could be made more efficient by using a per-LU data
1267 * structure of some sort.
1269 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1270 if (cm->cm_lun == tm->cm_lun)
1274 if (cm_count == 0) {
1275 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1276 "logical unit %u finished recovery after reset\n",
1279 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1282 /* we've finished recovery for this logical unit. check and
1283 * see if some other logical unit has a timedout command
1284 * that needs to be processed.
1286 cm = TAILQ_FIRST(&targ->timedout_commands);
1288 mprsas_send_abort(sc, tm, cm);
1292 mprsas_free_tm(sc, tm);
1296 /* if we still have commands for this LUN, the reset
1297 * effectively failed, regardless of the status reported.
1298 * Escalate to a target reset.
1300 mprsas_log_command(tm, MPR_RECOVERY,
1301 "logical unit reset complete for tm %p, but still have %u "
1302 "command(s)\n", tm, cm_count);
1303 mprsas_send_reset(sc, tm,
1304 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1309 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1311 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1312 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1313 struct mprsas_target *targ;
1315 callout_stop(&tm->cm_callout);
1317 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1318 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1322 * Currently there should be no way we can hit this case. It only
1323 * happens when we have a failure to allocate chain frames, and
1324 * task management commands don't have S/G lists.
1326 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1327 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! "
1328 "This should not happen!\n", __func__, tm->cm_flags);
1329 mprsas_free_tm(sc, tm);
1333 if (reply == NULL) {
1334 mprsas_log_command(tm, MPR_RECOVERY,
1335 "NULL reset reply for tm %p\n", tm);
1336 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1337 /* this completion was due to a reset, just cleanup */
1338 targ->flags &= ~MPRSAS_TARGET_INRESET;
1340 mprsas_free_tm(sc, tm);
1343 /* we should have gotten a reply. */
1349 mprsas_log_command(tm, MPR_RECOVERY,
1350 "target reset status 0x%x code 0x%x count %u\n",
1351 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1352 le32toh(reply->TerminationCount));
1354 targ->flags &= ~MPRSAS_TARGET_INRESET;
1356 if (targ->outstanding == 0) {
1357 /* we've finished recovery for this target and all
1358 * of its logical units.
1360 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1361 "recovery finished after target reset\n");
1363 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1367 mprsas_free_tm(sc, tm);
1370 /* after a target reset, if this target still has
1371 * outstanding commands, the reset effectively failed,
1372 * regardless of the status reported. escalate.
1374 mprsas_log_command(tm, MPR_RECOVERY,
1375 "target reset complete for tm %p, but still have %u "
1376 "command(s)\n", tm, targ->outstanding);
1381 #define MPR_RESET_TIMEOUT 30
1384 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1386 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1387 struct mprsas_target *target;
1390 target = tm->cm_targ;
1391 if (target->handle == 0) {
1392 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1393 __func__, target->tid);
1397 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1398 req->DevHandle = htole16(target->handle);
1399 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1400 req->TaskType = type;
1402 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1403 /* XXX Need to handle invalid LUNs */
1404 MPR_SET_LUN(req->LUN, tm->cm_lun);
1405 tm->cm_targ->logical_unit_resets++;
1406 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1407 "sending logical unit reset\n");
1408 tm->cm_complete = mprsas_logical_unit_reset_complete;
1410 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1412 * Target reset method =
1413 * SAS Hard Link Reset / SATA Link Reset
1415 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1416 tm->cm_targ->target_resets++;
1417 tm->cm_targ->flags |= MPRSAS_TARGET_INRESET;
1418 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1419 "sending target reset\n");
1420 tm->cm_complete = mprsas_target_reset_complete;
1423 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1427 mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid,
1429 if (target->encl_level_valid) {
1430 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1431 "connector name (%4s)\n", target->encl_level,
1432 target->encl_slot, target->connector_name);
1436 tm->cm_desc.HighPriority.RequestFlags =
1437 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1438 tm->cm_complete_data = (void *)tm;
1440 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1441 mprsas_tm_timeout, tm);
1443 err = mpr_map_command(sc, tm);
1445 mprsas_log_command(tm, MPR_RECOVERY,
1446 "error %d sending reset type %u\n",
1454 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1456 struct mpr_command *cm;
1457 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1458 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1459 struct mprsas_target *targ;
1461 callout_stop(&tm->cm_callout);
1463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1464 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1468 * Currently there should be no way we can hit this case. It only
1469 * happens when we have a failure to allocate chain frames, and
1470 * task management commands don't have S/G lists.
1472 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1473 mprsas_log_command(tm, MPR_RECOVERY,
1474 "cm_flags = %#x for abort %p TaskMID %u!\n",
1475 tm->cm_flags, tm, le16toh(req->TaskMID));
1476 mprsas_free_tm(sc, tm);
1480 if (reply == NULL) {
1481 mprsas_log_command(tm, MPR_RECOVERY,
1482 "NULL abort reply for tm %p TaskMID %u\n",
1483 tm, le16toh(req->TaskMID));
1484 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1485 /* this completion was due to a reset, just cleanup */
1487 mprsas_free_tm(sc, tm);
1490 /* we should have gotten a reply. */
1496 mprsas_log_command(tm, MPR_RECOVERY,
1497 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1498 le16toh(req->TaskMID),
1499 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1500 le32toh(reply->TerminationCount));
1502 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1504 /* if there are no more timedout commands, we're done with
1505 * error recovery for this target.
1507 mprsas_log_command(tm, MPR_RECOVERY,
1508 "finished recovery after aborting TaskMID %u\n",
1509 le16toh(req->TaskMID));
1512 mprsas_free_tm(sc, tm);
1514 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1515 /* abort success, but we have more timedout commands to abort */
1516 mprsas_log_command(tm, MPR_RECOVERY,
1517 "continuing recovery after aborting TaskMID %u\n",
1518 le16toh(req->TaskMID));
1520 mprsas_send_abort(sc, tm, cm);
1523 /* we didn't get a command completion, so the abort
1524 * failed as far as we're concerned. escalate.
1526 mprsas_log_command(tm, MPR_RECOVERY,
1527 "abort failed for TaskMID %u tm %p\n",
1528 le16toh(req->TaskMID), tm);
1530 mprsas_send_reset(sc, tm,
1531 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1535 #define MPR_ABORT_TIMEOUT 5
1538 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1539 struct mpr_command *cm)
1541 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1542 struct mprsas_target *targ;
1546 if (targ->handle == 0) {
1547 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1548 __func__, cm->cm_ccb->ccb_h.target_id);
1552 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1553 "Aborting command %p\n", cm);
1555 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1556 req->DevHandle = htole16(targ->handle);
1557 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1558 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1560 /* XXX Need to handle invalid LUNs */
1561 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1563 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1566 tm->cm_desc.HighPriority.RequestFlags =
1567 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1568 tm->cm_complete = mprsas_abort_complete;
1569 tm->cm_complete_data = (void *)tm;
1570 tm->cm_targ = cm->cm_targ;
1571 tm->cm_lun = cm->cm_lun;
1573 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1574 mprsas_tm_timeout, tm);
1578 err = mpr_map_command(sc, tm);
1580 mprsas_log_command(tm, MPR_RECOVERY,
1581 "error %d sending abort for cm %p SMID %u\n",
1582 err, cm, req->TaskMID);
1588 mprsas_scsiio_timeout(void *data)
1590 struct mpr_softc *sc;
1591 struct mpr_command *cm;
1592 struct mprsas_target *targ;
1594 cm = (struct mpr_command *)data;
1598 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1600 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1603 * Run the interrupt handler to make sure it's not pending. This
1604 * isn't perfect because the command could have already completed
1605 * and been re-used, though this is unlikely.
1607 mpr_intr_locked(sc);
1608 if (cm->cm_state == MPR_CM_STATE_FREE) {
1609 mprsas_log_command(cm, MPR_XINFO,
1610 "SCSI command %p almost timed out\n", cm);
1614 if (cm->cm_ccb == NULL) {
1615 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1622 mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p "
1623 "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1625 if (targ->encl_level_valid) {
1626 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1627 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1628 targ->connector_name);
1631 /* XXX first, check the firmware state, to see if it's still
1632 * operational. if not, do a diag reset.
1635 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1636 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1637 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1639 if (targ->tm != NULL) {
1640 /* target already in recovery, just queue up another
1641 * timedout command to be processed later.
1643 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1644 "processing by tm %p\n", cm, targ->tm);
1646 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1647 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1650 /* start recovery by aborting the first timedout command */
1651 mprsas_send_abort(sc, targ->tm, cm);
1654 /* XXX queue this target up for recovery once a TM becomes
1655 * available. The firmware only has a limited number of
1656 * HighPriority credits for the high priority requests used
1657 * for task management, and we ran out.
1659 * Isilon: don't worry about this for now, since we have
1660 * more credits than disks in an enclosure, and limit
1661 * ourselves to one TM per target for recovery.
1663 mpr_dprint(sc, MPR_RECOVERY,
1664 "timedout cm %p failed to allocate a tm\n", cm);
1669 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1671 MPI2_SCSI_IO_REQUEST *req;
1672 struct ccb_scsiio *csio;
1673 struct mpr_softc *sc;
1674 struct mprsas_target *targ;
1675 struct mprsas_lun *lun;
1676 struct mpr_command *cm;
1677 uint8_t i, lba_byte, *ref_tag_addr;
1678 uint16_t eedp_flags;
1679 uint32_t mpi_control;
1683 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1686 targ = &sassc->targets[csio->ccb_h.target_id];
1687 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1688 if (targ->handle == 0x0) {
1689 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1690 __func__, csio->ccb_h.target_id);
1691 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1695 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1696 mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO "
1697 "supported %u\n", __func__, csio->ccb_h.target_id);
1698 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1703 * Sometimes, it is possible to get a command that is not "In
1704 * Progress" and was actually aborted by the upper layer. Check for
1705 * this here and complete the command without error.
1707 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1708 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1709 "target %u\n", __func__, csio->ccb_h.target_id);
1714 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1715 * that the volume has timed out. We want volumes to be enumerated
1716 * until they are deleted/removed, not just failed.
1718 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1719 if (targ->devinfo == 0)
1720 csio->ccb_h.status = CAM_REQ_CMP;
1722 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1727 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1728 mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__);
1729 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1734 cm = mpr_alloc_command(sc);
1735 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1737 mpr_free_command(sc, cm);
1739 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1740 xpt_freeze_simq(sassc->sim, 1);
1741 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1743 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1744 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1749 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1750 bzero(req, sizeof(*req));
1751 req->DevHandle = htole16(targ->handle);
1752 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1754 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1755 req->SenseBufferLength = MPR_SENSE_LEN;
1757 req->ChainOffset = 0;
1758 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1763 req->DataLength = htole32(csio->dxfer_len);
1764 req->BidirectionalDataLength = 0;
1765 req->IoFlags = htole16(csio->cdb_len);
1768 /* Note: BiDirectional transfers are not supported */
1769 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1771 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1772 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1775 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1776 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1780 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1784 if (csio->cdb_len == 32)
1785 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1787 * It looks like the hardware doesn't require an explicit tag
1788 * number for each transaction. SAM Task Management not supported
1791 switch (csio->tag_action) {
1792 case MSG_HEAD_OF_Q_TAG:
1793 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1795 case MSG_ORDERED_Q_TAG:
1796 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1799 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1801 case CAM_TAG_ACTION_NONE:
1802 case MSG_SIMPLE_Q_TAG:
1804 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1807 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1808 req->Control = htole32(mpi_control);
1810 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1811 mpr_free_command(sc, cm);
1812 ccb->ccb_h.status = CAM_LUN_INVALID;
1817 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1818 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1820 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1821 req->IoFlags = htole16(csio->cdb_len);
1824 * Check if EEDP is supported and enabled. If it is then check if the
1825 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1826 * is formatted for EEDP support. If all of this is true, set CDB up
1827 * for EEDP transfer.
1829 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1830 if (sc->eedp_enabled && eedp_flags) {
1831 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1832 if (lun->lun_id == csio->ccb_h.target_lun) {
1837 if ((lun != NULL) && (lun->eedp_formatted)) {
1838 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1839 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1840 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1841 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1842 req->EEDPFlags = htole16(eedp_flags);
1845 * If CDB less than 32, fill in Primary Ref Tag with
1846 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1847 * already there. Also, set protection bit. FreeBSD
1848 * currently does not support CDBs bigger than 16, but
1849 * the code doesn't hurt, and will be here for the
1852 if (csio->cdb_len != 32) {
1853 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1854 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1855 PrimaryReferenceTag;
1856 for (i = 0; i < 4; i++) {
1858 req->CDB.CDB32[lba_byte + i];
1861 req->CDB.EEDP32.PrimaryReferenceTag =
1863 CDB.EEDP32.PrimaryReferenceTag);
1864 req->CDB.EEDP32.PrimaryApplicationTagMask =
1866 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1870 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1871 req->EEDPFlags = htole16(eedp_flags);
1872 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1878 cm->cm_length = csio->dxfer_len;
1879 if (cm->cm_length != 0) {
1881 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1885 cm->cm_sge = &req->SGL;
1886 cm->cm_sglsize = (32 - 24) * 4;
1887 cm->cm_complete = mprsas_scsiio_complete;
1888 cm->cm_complete_data = ccb;
1890 cm->cm_lun = csio->ccb_h.target_lun;
1893 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1894 * and set descriptor type.
1896 if (targ->scsi_req_desc_type ==
1897 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1898 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1899 cm->cm_desc.FastPathSCSIIO.RequestFlags =
1900 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1901 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1903 cm->cm_desc.SCSIIO.RequestFlags =
1904 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1905 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1908 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1909 mprsas_scsiio_timeout, cm, 0);
1912 targ->outstanding++;
1913 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1914 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1916 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1917 __func__, cm, ccb, targ->outstanding);
1919 mpr_map_command(sc, cm);
1924 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1928 switch (response_code) {
1929 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1930 desc = "task management request completed";
1932 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1933 desc = "invalid frame";
1935 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1936 desc = "task management request not supported";
1938 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1939 desc = "task management request failed";
1941 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1942 desc = "task management request succeeded";
1944 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1945 desc = "invalid lun";
1948 desc = "overlapped tag attempted";
1950 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1951 desc = "task queued, however not sent to target";
1957 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1962 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1965 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1966 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1970 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1971 MPI2_IOCSTATUS_MASK;
1972 u8 scsi_state = mpi_reply->SCSIState;
1973 u8 scsi_status = mpi_reply->SCSIStatus;
1974 char *desc_ioc_state = NULL;
1975 char *desc_scsi_status = NULL;
1976 char *desc_scsi_state = sc->tmp_string;
1977 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1979 if (log_info == 0x31170000)
1982 switch (ioc_status) {
1983 case MPI2_IOCSTATUS_SUCCESS:
1984 desc_ioc_state = "success";
1986 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1987 desc_ioc_state = "invalid function";
1989 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1990 desc_ioc_state = "scsi recovered error";
1992 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1993 desc_ioc_state = "scsi invalid dev handle";
1995 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1996 desc_ioc_state = "scsi device not there";
1998 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1999 desc_ioc_state = "scsi data overrun";
2001 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2002 desc_ioc_state = "scsi data underrun";
2004 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2005 desc_ioc_state = "scsi io data error";
2007 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2008 desc_ioc_state = "scsi protocol error";
2010 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2011 desc_ioc_state = "scsi task terminated";
2013 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2014 desc_ioc_state = "scsi residual mismatch";
2016 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2017 desc_ioc_state = "scsi task mgmt failed";
2019 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2020 desc_ioc_state = "scsi ioc terminated";
2022 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2023 desc_ioc_state = "scsi ext terminated";
2025 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2026 desc_ioc_state = "eedp guard error";
2028 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2029 desc_ioc_state = "eedp ref tag error";
2031 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2032 desc_ioc_state = "eedp app tag error";
2035 desc_ioc_state = "unknown";
2039 switch (scsi_status) {
2040 case MPI2_SCSI_STATUS_GOOD:
2041 desc_scsi_status = "good";
2043 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2044 desc_scsi_status = "check condition";
2046 case MPI2_SCSI_STATUS_CONDITION_MET:
2047 desc_scsi_status = "condition met";
2049 case MPI2_SCSI_STATUS_BUSY:
2050 desc_scsi_status = "busy";
2052 case MPI2_SCSI_STATUS_INTERMEDIATE:
2053 desc_scsi_status = "intermediate";
2055 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2056 desc_scsi_status = "intermediate condmet";
2058 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2059 desc_scsi_status = "reservation conflict";
2061 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2062 desc_scsi_status = "command terminated";
2064 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2065 desc_scsi_status = "task set full";
2067 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2068 desc_scsi_status = "aca active";
2070 case MPI2_SCSI_STATUS_TASK_ABORTED:
2071 desc_scsi_status = "task aborted";
2074 desc_scsi_status = "unknown";
2078 desc_scsi_state[0] = '\0';
2080 desc_scsi_state = " ";
2081 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2082 strcat(desc_scsi_state, "response info ");
2083 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2084 strcat(desc_scsi_state, "state terminated ");
2085 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2086 strcat(desc_scsi_state, "no status ");
2087 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2088 strcat(desc_scsi_state, "autosense failed ");
2089 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2090 strcat(desc_scsi_state, "autosense valid ");
2092 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2093 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2094 if (targ->encl_level_valid) {
2095 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2096 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2097 targ->connector_name);
2099 /* We can add more detail about underflow data here
2102 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2103 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2104 desc_scsi_state, scsi_state);
2106 if (sc->mpr_debug & MPR_XINFO &&
2107 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2108 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2109 scsi_sense_print(csio);
2110 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2113 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2114 response_info = le32toh(mpi_reply->ResponseInfo);
2115 response_bytes = (u8 *)&response_info;
2116 mpr_response_code(sc,response_bytes[0]);
2121 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2123 MPI2_SCSI_IO_REPLY *rep;
2125 struct ccb_scsiio *csio;
2126 struct mprsas_softc *sassc;
2127 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2128 u8 *TLR_bits, TLR_on;
2133 mpr_dprint(sc, MPR_TRACE,
2134 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2135 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2136 cm->cm_targ->outstanding);
2138 callout_stop(&cm->cm_callout);
2139 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2142 ccb = cm->cm_complete_data;
2144 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2146 * XXX KDM if the chain allocation fails, does it matter if we do
2147 * the sync and unload here? It is simpler to do it in every case,
2148 * assuming it doesn't cause problems.
2150 if (cm->cm_data != NULL) {
2151 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2152 dir = BUS_DMASYNC_POSTREAD;
2153 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2154 dir = BUS_DMASYNC_POSTWRITE;
2155 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2156 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2159 cm->cm_targ->completed++;
2160 cm->cm_targ->outstanding--;
2161 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2162 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2164 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2165 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2166 if (cm->cm_reply != NULL)
2167 mprsas_log_command(cm, MPR_RECOVERY,
2168 "completed timedout cm %p ccb %p during recovery "
2169 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2170 le16toh(rep->IOCStatus), rep->SCSIStatus,
2171 rep->SCSIState, le32toh(rep->TransferCount));
2173 mprsas_log_command(cm, MPR_RECOVERY,
2174 "completed timedout cm %p ccb %p during recovery\n",
2176 } else if (cm->cm_targ->tm != NULL) {
2177 if (cm->cm_reply != NULL)
2178 mprsas_log_command(cm, MPR_RECOVERY,
2179 "completed cm %p ccb %p during recovery "
2180 "ioc %x scsi %x state %x xfer %u\n",
2181 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2182 rep->SCSIStatus, rep->SCSIState,
2183 le32toh(rep->TransferCount));
2185 mprsas_log_command(cm, MPR_RECOVERY,
2186 "completed cm %p ccb %p during recovery\n",
2188 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2189 mprsas_log_command(cm, MPR_RECOVERY,
2190 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2193 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2195 * We ran into an error after we tried to map the command,
2196 * so we're getting a callback without queueing the command
2197 * to the hardware. So we set the status here, and it will
2198 * be retained below. We'll go through the "fast path",
2199 * because there can be no reply when we haven't actually
2200 * gone out to the hardware.
2202 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2205 * Currently the only error included in the mask is
2206 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2207 * chain frames. We need to freeze the queue until we get
2208 * a command that completed without this error, which will
2209 * hopefully have some chain frames attached that we can
2210 * use. If we wanted to get smarter about it, we would
2211 * only unfreeze the queue in this condition when we're
2212 * sure that we're getting some chain frames back. That's
2213 * probably unnecessary.
2215 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2216 xpt_freeze_simq(sassc->sim, 1);
2217 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2218 mpr_dprint(sc, MPR_INFO, "Error sending command, "
2219 "freezing SIM queue\n");
2224 * If this is a Start Stop Unit command and it was issued by the driver
2225 * during shutdown, decrement the refcount to account for all of the
2226 * commands that were sent. All SSU commands should be completed before
2227 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2230 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2231 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2235 /* Take the fast path to completion */
2236 if (cm->cm_reply == NULL) {
2237 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2238 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2239 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2241 ccb->ccb_h.status = CAM_REQ_CMP;
2242 ccb->csio.scsi_status = SCSI_STATUS_OK;
2244 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2245 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2246 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2247 mpr_dprint(sc, MPR_XINFO,
2248 "Unfreezing SIM queue\n");
2253 * There are two scenarios where the status won't be
2254 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2255 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2257 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2259 * Freeze the dev queue so that commands are
2260 * executed in the correct order with after error
2263 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2264 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2266 mpr_free_command(sc, cm);
2271 mprsas_log_command(cm, MPR_XINFO,
2272 "ioc %x scsi %x state %x xfer %u\n",
2273 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2274 le32toh(rep->TransferCount));
2276 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2277 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2278 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2280 case MPI2_IOCSTATUS_SUCCESS:
2281 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2283 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2284 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2285 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2287 /* Completion failed at the transport level. */
2288 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2289 MPI2_SCSI_STATE_TERMINATED)) {
2290 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2294 /* In a modern packetized environment, an autosense failure
2295 * implies that there's not much else that can be done to
2296 * recover the command.
2298 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2299 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2304 * CAM doesn't care about SAS Response Info data, but if this is
2305 * the state check if TLR should be done. If not, clear the
2306 * TLR_bits for the target.
2308 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2309 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2310 == MPR_SCSI_RI_INVALID_FRAME)) {
2311 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2312 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2316 * Intentionally override the normal SCSI status reporting
2317 * for these two cases. These are likely to happen in a
2318 * multi-initiator environment, and we want to make sure that
2319 * CAM retries these commands rather than fail them.
2321 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2322 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2323 ccb->ccb_h.status = CAM_REQ_ABORTED;
2327 /* Handle normal status and sense */
2328 csio->scsi_status = rep->SCSIStatus;
2329 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2330 ccb->ccb_h.status = CAM_REQ_CMP;
2332 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2334 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2335 int sense_len, returned_sense_len;
2337 returned_sense_len = min(le32toh(rep->SenseCount),
2338 sizeof(struct scsi_sense_data));
2339 if (returned_sense_len < csio->sense_len)
2340 csio->sense_resid = csio->sense_len -
2343 csio->sense_resid = 0;
2345 sense_len = min(returned_sense_len,
2346 csio->sense_len - csio->sense_resid);
2347 bzero(&csio->sense_data, sizeof(csio->sense_data));
2348 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2349 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2353 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2354 * and it's page code 0 (Supported Page List), and there is
2355 * inquiry data, and this is for a sequential access device, and
2356 * the device is an SSP target, and TLR is supported by the
2357 * controller, turn the TLR_bits value ON if page 0x90 is
2360 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2361 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2362 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2363 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2364 (csio->data_ptr != NULL) &&
2365 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2366 (sc->control_TLR) &&
2367 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2368 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2369 vpd_list = (struct scsi_vpd_supported_page_list *)
2371 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2373 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2374 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2375 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2376 csio->cdb_io.cdb_bytes[4];
2377 alloc_len -= csio->resid;
2378 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2379 if (vpd_list->list[i] == 0x90) {
2386 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2387 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2389 * If devinfo is 0 this will be a volume. In that case don't
2390 * tell CAM that the volume is not there. We want volumes to
2391 * be enumerated until they are deleted/removed, not just
2394 if (cm->cm_targ->devinfo == 0)
2395 ccb->ccb_h.status = CAM_REQ_CMP;
2397 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2399 case MPI2_IOCSTATUS_INVALID_SGL:
2400 mpr_print_scsiio_cmd(sc, cm);
2401 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2403 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2405 * This is one of the responses that comes back when an I/O
2406 * has been aborted. If it is because of a timeout that we
2407 * initiated, just set the status to CAM_CMD_TIMEOUT.
2408 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2409 * command is the same (it gets retried, subject to the
2410 * retry counter), the only difference is what gets printed
2413 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2414 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2416 ccb->ccb_h.status = CAM_REQ_ABORTED;
2418 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2419 /* resid is ignored for this condition */
2421 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2423 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2424 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2426 * Since these are generally external (i.e. hopefully
2427 * transient transport-related) errors, retry these without
2428 * decrementing the retry count.
2430 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2431 mprsas_log_command(cm, MPR_INFO,
2432 "terminated ioc %x scsi %x state %x xfer %u\n",
2433 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2434 le32toh(rep->TransferCount));
2436 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2437 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2438 case MPI2_IOCSTATUS_INVALID_VPID:
2439 case MPI2_IOCSTATUS_INVALID_FIELD:
2440 case MPI2_IOCSTATUS_INVALID_STATE:
2441 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2442 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2443 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2444 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2445 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2447 mprsas_log_command(cm, MPR_XINFO,
2448 "completed ioc %x scsi %x state %x xfer %u\n",
2449 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2450 le32toh(rep->TransferCount));
2451 csio->resid = cm->cm_length;
2452 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2456 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2458 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2459 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2460 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2461 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2465 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2466 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2467 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2470 mpr_free_command(sc, cm);
2474 #if __FreeBSD_version >= 900026
2476 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2478 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2479 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2483 ccb = cm->cm_complete_data;
2486 * Currently there should be no way we can hit this case. It only
2487 * happens when we have a failure to allocate chain frames, and SMP
2488 * commands require two S/G elements only. That should be handled
2489 * in the standard request size.
2491 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2492 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2493 __func__, cm->cm_flags);
2494 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2498 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2500 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2501 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2505 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2506 sasaddr = le32toh(req->SASAddress.Low);
2507 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2509 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2510 MPI2_IOCSTATUS_SUCCESS ||
2511 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2512 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2513 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2514 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2518 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address "
2519 "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr);
2521 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2522 ccb->ccb_h.status = CAM_REQ_CMP;
2524 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2528 * We sync in both directions because we had DMAs in the S/G list
2529 * in both directions.
2531 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2532 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2533 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2534 mpr_free_command(sc, cm);
2539 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2542 struct mpr_command *cm;
2543 uint8_t *request, *response;
2544 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2545 struct mpr_softc *sc;
2553 #if (__FreeBSD_version >= 1000028) || \
2554 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2555 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2556 case CAM_DATA_PADDR:
2557 case CAM_DATA_SG_PADDR:
2559 * XXX We don't yet support physical addresses here.
2561 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2562 "supported\n", __func__);
2563 ccb->ccb_h.status = CAM_REQ_INVALID;
2568 * The chip does not support more than one buffer for the
2569 * request or response.
2571 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2572 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2573 mpr_dprint(sc, MPR_ERROR,
2574 "%s: multiple request or response buffer segments "
2575 "not supported for SMP\n", __func__);
2576 ccb->ccb_h.status = CAM_REQ_INVALID;
2582 * The CAM_SCATTER_VALID flag was originally implemented
2583 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2584 * We have two. So, just take that flag to mean that we
2585 * might have S/G lists, and look at the S/G segment count
2586 * to figure out whether that is the case for each individual
2589 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2590 bus_dma_segment_t *req_sg;
2592 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2593 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2595 request = ccb->smpio.smp_request;
2597 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2598 bus_dma_segment_t *rsp_sg;
2600 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2601 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2603 response = ccb->smpio.smp_response;
2605 case CAM_DATA_VADDR:
2606 request = ccb->smpio.smp_request;
2607 response = ccb->smpio.smp_response;
2610 ccb->ccb_h.status = CAM_REQ_INVALID;
2614 #else /* __FreeBSD_version < 1000028 */
2616 * XXX We don't yet support physical addresses here.
2618 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2619 mpr_printf(sc, "%s: physical addresses not supported\n",
2621 ccb->ccb_h.status = CAM_REQ_INVALID;
2627 * If the user wants to send an S/G list, check to make sure they
2628 * have single buffers.
2630 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2632 * The chip does not support more than one buffer for the
2633 * request or response.
2635 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2636 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2637 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2638 "response buffer segments not supported for SMP\n",
2640 ccb->ccb_h.status = CAM_REQ_INVALID;
2646 * The CAM_SCATTER_VALID flag was originally implemented
2647 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2648 * We have two. So, just take that flag to mean that we
2649 * might have S/G lists, and look at the S/G segment count
2650 * to figure out whether that is the case for each individual
2653 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2654 bus_dma_segment_t *req_sg;
2656 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2657 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2659 request = ccb->smpio.smp_request;
2661 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2662 bus_dma_segment_t *rsp_sg;
2664 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2665 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2667 response = ccb->smpio.smp_response;
2669 request = ccb->smpio.smp_request;
2670 response = ccb->smpio.smp_response;
2672 #endif /* __FreeBSD_version < 1000028 */
2674 cm = mpr_alloc_command(sc);
2676 mpr_dprint(sc, MPR_ERROR,
2677 "%s: cannot allocate command\n", __func__);
2678 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2683 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2684 bzero(req, sizeof(*req));
2685 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2687 /* Allow the chip to use any route to this SAS address. */
2688 req->PhysicalPort = 0xff;
2690 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2692 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2694 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2695 "%#jx\n", __func__, (uintmax_t)sasaddr);
2697 mpr_init_sge(cm, req, &req->SGL);
2700 * Set up a uio to pass into mpr_map_command(). This allows us to
2701 * do one map command, and one busdma call in there.
2703 cm->cm_uio.uio_iov = cm->cm_iovec;
2704 cm->cm_uio.uio_iovcnt = 2;
2705 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2708 * The read/write flag isn't used by busdma, but set it just in
2709 * case. This isn't exactly accurate, either, since we're going in
2712 cm->cm_uio.uio_rw = UIO_WRITE;
2714 cm->cm_iovec[0].iov_base = request;
2715 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2716 cm->cm_iovec[1].iov_base = response;
2717 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2719 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2720 cm->cm_iovec[1].iov_len;
2723 * Trigger a warning message in mpr_data_cb() for the user if we
2724 * wind up exceeding two S/G segments. The chip expects one
2725 * segment for the request and another for the response.
2727 cm->cm_max_segs = 2;
2729 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2730 cm->cm_complete = mprsas_smpio_complete;
2731 cm->cm_complete_data = ccb;
2734 * Tell the mapping code that we're using a uio, and that this is
2735 * an SMP passthrough request. There is a little special-case
2736 * logic there (in mpr_data_cb()) to handle the bidirectional
2739 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2740 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2742 /* The chip data format is little endian. */
2743 req->SASAddress.High = htole32(sasaddr >> 32);
2744 req->SASAddress.Low = htole32(sasaddr);
2747 * XXX Note that we don't have a timeout/abort mechanism here.
2748 * From the manual, it looks like task management requests only
2749 * work for SCSI IO and SATA passthrough requests. We may need to
2750 * have a mechanism to retry requests in the event of a chip reset
2751 * at least. Hopefully the chip will insure that any errors short
2752 * of that are relayed back to the driver.
2754 error = mpr_map_command(sc, cm);
2755 if ((error != 0) && (error != EINPROGRESS)) {
2756 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2757 "mpr_map_command()\n", __func__, error);
2764 mpr_free_command(sc, cm);
2765 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2771 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2773 struct mpr_softc *sc;
2774 struct mprsas_target *targ;
2775 uint64_t sasaddr = 0;
2780 * Make sure the target exists.
2782 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2783 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2784 targ = &sassc->targets[ccb->ccb_h.target_id];
2785 if (targ->handle == 0x0) {
2786 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2787 __func__, ccb->ccb_h.target_id);
2788 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2794 * If this device has an embedded SMP target, we'll talk to it
2796 * figure out what the expander's address is.
2798 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2799 sasaddr = targ->sasaddr;
2802 * If we don't have a SAS address for the expander yet, try
2803 * grabbing it from the page 0x83 information cached in the
2804 * transport layer for this target. LSI expanders report the
2805 * expander SAS address as the port-associated SAS address in
2806 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2809 * XXX KDM disable this for now, but leave it commented out so that
2810 * it is obvious that this is another possible way to get the SAS
2813 * The parent handle method below is a little more reliable, and
2814 * the other benefit is that it works for devices other than SES
2815 * devices. So you can send a SMP request to a da(4) device and it
2816 * will get routed to the expander that device is attached to.
2817 * (Assuming the da(4) device doesn't contain an SMP target...)
2821 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2825 * If we still don't have a SAS address for the expander, look for
2826 * the parent device of this device, which is probably the expander.
2829 #ifdef OLD_MPR_PROBE
2830 struct mprsas_target *parent_target;
2833 if (targ->parent_handle == 0x0) {
2834 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2835 "a valid parent handle!\n", __func__, targ->handle);
2836 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2839 #ifdef OLD_MPR_PROBE
2840 parent_target = mprsas_find_target_by_handle(sassc, 0,
2841 targ->parent_handle);
2843 if (parent_target == NULL) {
2844 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2845 "a valid parent target!\n", __func__, targ->handle);
2846 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2850 if ((parent_target->devinfo &
2851 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2852 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2853 "does not have an SMP target!\n", __func__,
2854 targ->handle, parent_target->handle);
2855 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2860 sasaddr = parent_target->sasaddr;
2861 #else /* OLD_MPR_PROBE */
2862 if ((targ->parent_devinfo &
2863 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2864 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2865 "does not have an SMP target!\n", __func__,
2866 targ->handle, targ->parent_handle);
2867 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2871 if (targ->parent_sasaddr == 0x0) {
2872 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2873 "%d does not have a valid SAS address!\n", __func__,
2874 targ->handle, targ->parent_handle);
2875 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2879 sasaddr = targ->parent_sasaddr;
2880 #endif /* OLD_MPR_PROBE */
2885 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2886 "handle %d\n", __func__, targ->handle);
2887 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2890 mprsas_send_smpcmd(sassc, ccb, sasaddr);
2898 #endif //__FreeBSD_version >= 900026
2901 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2903 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2904 struct mpr_softc *sc;
2905 struct mpr_command *tm;
2906 struct mprsas_target *targ;
2908 MPR_FUNCTRACE(sassc->sc);
2909 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2911 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2912 ("Target %d out of bounds in XPT_RESET_DEV\n",
2913 ccb->ccb_h.target_id));
2915 tm = mpr_alloc_command(sc);
2917 mpr_dprint(sc, MPR_ERROR,
2918 "command alloc failure in mprsas_action_resetdev\n");
2919 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2924 targ = &sassc->targets[ccb->ccb_h.target_id];
2925 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2926 req->DevHandle = htole16(targ->handle);
2927 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2928 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2930 /* SAS Hard Link Reset / SATA Link Reset */
2931 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2934 tm->cm_desc.HighPriority.RequestFlags =
2935 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2936 tm->cm_complete = mprsas_resetdev_complete;
2937 tm->cm_complete_data = ccb;
2939 mpr_map_command(sc, tm);
2943 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2945 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2949 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2951 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2952 ccb = tm->cm_complete_data;
2955 * Currently there should be no way we can hit this case. It only
2956 * happens when we have a failure to allocate chain frames, and
2957 * task management commands don't have S/G lists.
2959 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2960 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2962 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2964 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
2965 "handle %#04x! This should not happen!\n", __func__,
2966 tm->cm_flags, req->DevHandle);
2967 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2971 mpr_dprint(sc, MPR_XINFO,
2972 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2973 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
2975 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2976 ccb->ccb_h.status = CAM_REQ_CMP;
2977 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2981 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2985 mprsas_free_tm(sc, tm);
2990 mprsas_poll(struct cam_sim *sim)
2992 struct mprsas_softc *sassc;
2994 sassc = cam_sim_softc(sim);
2996 if (sassc->sc->mpr_debug & MPR_TRACE) {
2997 /* frequent debug messages during a panic just slow
2998 * everything down too much.
3000 mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__);
3001 sassc->sc->mpr_debug &= ~MPR_TRACE;
3004 mpr_intr_locked(sassc->sc);
3008 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3011 struct mpr_softc *sc;
3013 sc = (struct mpr_softc *)callback_arg;
3016 #if (__FreeBSD_version >= 1000006) || \
3017 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3018 case AC_ADVINFO_CHANGED: {
3019 struct mprsas_target *target;
3020 struct mprsas_softc *sassc;
3021 struct scsi_read_capacity_data_long rcap_buf;
3022 struct ccb_dev_advinfo cdai;
3023 struct mprsas_lun *lun;
3028 buftype = (uintptr_t)arg;
3034 * We're only interested in read capacity data changes.
3036 if (buftype != CDAI_TYPE_RCAPLONG)
3040 * See the comment in mpr_attach_sas() for a detailed
3041 * explanation. In these versions of FreeBSD we register
3042 * for all events and filter out the events that don't
3045 #if (__FreeBSD_version < 1000703) || \
3046 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3047 if (xpt_path_path_id(path) != sassc->sim->path_id)
3052 * We should have a handle for this, but check to make sure.
3054 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3055 ("Target %d out of bounds in mprsas_async\n",
3056 xpt_path_target_id(path)));
3057 target = &sassc->targets[xpt_path_target_id(path)];
3058 if (target->handle == 0)
3061 lunid = xpt_path_lun_id(path);
3063 SLIST_FOREACH(lun, &target->luns, lun_link) {
3064 if (lun->lun_id == lunid) {
3070 if (found_lun == 0) {
3071 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3074 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3075 "LUN for EEDP support.\n");
3078 lun->lun_id = lunid;
3079 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3082 bzero(&rcap_buf, sizeof(rcap_buf));
3083 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3084 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3085 cdai.ccb_h.flags = CAM_DIR_IN;
3086 cdai.buftype = CDAI_TYPE_RCAPLONG;
3087 #if (__FreeBSD_version >= 1100061) || \
3088 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3089 cdai.flags = CDAI_FLAG_NONE;
3093 cdai.bufsiz = sizeof(rcap_buf);
3094 cdai.buf = (uint8_t *)&rcap_buf;
3095 xpt_action((union ccb *)&cdai);
3096 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3097 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3099 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3100 && (rcap_buf.prot & SRC16_PROT_EN)) {
3101 lun->eedp_formatted = TRUE;
3102 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3104 lun->eedp_formatted = FALSE;
3105 lun->eedp_block_size = 0;
3110 case AC_FOUND_DEVICE: {
3111 struct ccb_getdev *cgd;
3114 * See the comment in mpr_attach_sas() for a detailed
3115 * explanation. In these versions of FreeBSD we register
3116 * for all events and filter out the events that don't
3119 #if (__FreeBSD_version < 1000703) || \
3120 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3121 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3126 mprsas_prepare_ssu(sc, path, cgd);
3128 #if (__FreeBSD_version < 901503) || \
3129 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3130 mprsas_check_eedp(sc, path, cgd);
3140 mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
3141 struct ccb_getdev *cgd)
3143 struct mprsas_softc *sassc = sc->sassc;
3145 target_id_t targetid;
3147 struct mprsas_target *target;
3148 struct mprsas_lun *lun;
3152 pathid = cam_sim_path(sassc->sim);
3153 targetid = xpt_path_target_id(path);
3154 lunid = xpt_path_lun_id(path);
3156 KASSERT(targetid < sassc->maxtargets,
3157 ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid));
3158 target = &sassc->targets[targetid];
3159 if (target->handle == 0x0)
3163 * If LUN is already in list, don't create a new one.
3166 SLIST_FOREACH(lun, &target->luns, lun_link) {
3167 if (lun->lun_id == lunid) {
3173 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3176 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3177 "preparing SSU.\n");
3180 lun->lun_id = lunid;
3181 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3185 * If this is a SATA direct-access end device, mark it so that a SCSI
3186 * StartStopUnit command will be sent to it when the driver is being
3189 if (((cgd->inq_data.device & 0x1F) == T_DIRECT) &&
3190 (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3191 ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3192 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3193 lun->stop_at_shutdown = TRUE;
3197 #if (__FreeBSD_version < 901503) || \
3198 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3200 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3201 struct ccb_getdev *cgd)
3203 struct mprsas_softc *sassc = sc->sassc;
3204 struct ccb_scsiio *csio;
3205 struct scsi_read_capacity_16 *scsi_cmd;
3206 struct scsi_read_capacity_eedp *rcap_buf;
3208 target_id_t targetid;
3211 struct cam_path *local_path;
3212 struct mprsas_target *target;
3213 struct mprsas_lun *lun;
3218 pathid = cam_sim_path(sassc->sim);
3219 targetid = xpt_path_target_id(path);
3220 lunid = xpt_path_lun_id(path);
3222 KASSERT(targetid < sassc->maxtargets,
3223 ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3224 target = &sassc->targets[targetid];
3225 if (target->handle == 0x0)
3229 * Determine if the device is EEDP capable.
3231 * If this flag is set in the inquiry data, the device supports
3232 * protection information, and must support the 16 byte read capacity
3233 * command, otherwise continue without sending read cap 16
3235 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3239 * Issue a READ CAPACITY 16 command. This info is used to determine if
3240 * the LUN is formatted for EEDP support.
3242 ccb = xpt_alloc_ccb_nowait();
3244 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3249 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3251 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3258 * If LUN is already in list, don't create a new one.
3261 SLIST_FOREACH(lun, &target->luns, lun_link) {
3262 if (lun->lun_id == lunid) {
3268 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3271 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3273 xpt_free_path(local_path);
3277 lun->lun_id = lunid;
3278 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3281 xpt_path_string(local_path, path_str, sizeof(path_str));
3282 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3283 path_str, target->handle);
3286 * Issue a READ CAPACITY 16 command for the LUN. The
3287 * mprsas_read_cap_done function will load the read cap info into the
3290 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3292 if (rcap_buf == NULL) {
3293 mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity "
3294 "buffer for EEDP support.\n");
3295 xpt_free_path(ccb->ccb_h.path);
3299 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3301 csio->ccb_h.func_code = XPT_SCSI_IO;
3302 csio->ccb_h.flags = CAM_DIR_IN;
3303 csio->ccb_h.retry_count = 4;
3304 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3305 csio->ccb_h.timeout = 60000;
3306 csio->data_ptr = (uint8_t *)rcap_buf;
3307 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3308 csio->sense_len = MPR_SENSE_LEN;
3309 csio->cdb_len = sizeof(*scsi_cmd);
3310 csio->tag_action = MSG_SIMPLE_Q_TAG;
3312 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3313 bzero(scsi_cmd, sizeof(*scsi_cmd));
3314 scsi_cmd->opcode = 0x9E;
3315 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3316 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3318 ccb->ccb_h.ppriv_ptr1 = sassc;
3323 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3325 struct mprsas_softc *sassc;
3326 struct mprsas_target *target;
3327 struct mprsas_lun *lun;
3328 struct scsi_read_capacity_eedp *rcap_buf;
3330 if (done_ccb == NULL)
3333 /* Driver need to release devq, it Scsi command is
3334 * generated by driver internally.
3335 * Currently there is a single place where driver
3336 * calls scsi command internally. In future if driver
3337 * calls more scsi command internally, it needs to release
3338 * devq internally, since those command will not go back to
3341 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3342 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3343 xpt_release_devq(done_ccb->ccb_h.path,
3344 /*count*/ 1, /*run_queue*/TRUE);
3347 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3350 * Get the LUN ID for the path and look it up in the LUN list for the
3353 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3354 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3355 ("Target %d out of bounds in mprsas_read_cap_done\n",
3356 done_ccb->ccb_h.target_id));
3357 target = &sassc->targets[done_ccb->ccb_h.target_id];
3358 SLIST_FOREACH(lun, &target->luns, lun_link) {
3359 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3363 * Got the LUN in the target's LUN list. Fill it in with EEDP
3364 * info. If the READ CAP 16 command had some SCSI error (common
3365 * if command is not supported), mark the lun as not supporting
3366 * EEDP and set the block size to 0.
3368 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3369 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3370 lun->eedp_formatted = FALSE;
3371 lun->eedp_block_size = 0;
3375 if (rcap_buf->protect & 0x01) {
3376 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for "
3377 "target ID %d is formatted for EEDP "
3378 "support.\n", done_ccb->ccb_h.target_lun,
3379 done_ccb->ccb_h.target_id);
3380 lun->eedp_formatted = TRUE;
3381 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3386 // Finished with this CCB and path.
3387 free(rcap_buf, M_MPR);
3388 xpt_free_path(done_ccb->ccb_h.path);
3389 xpt_free_ccb(done_ccb);
3391 #endif /* (__FreeBSD_version < 901503) || \
3392 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3395 mprsas_startup(struct mpr_softc *sc)
3398 * Send the port enable message and set the wait_for_port_enable flag.
3399 * This flag helps to keep the simq frozen until all discovery events
3402 sc->wait_for_port_enable = 1;
3403 mprsas_send_portenable(sc);
3408 mprsas_send_portenable(struct mpr_softc *sc)
3410 MPI2_PORT_ENABLE_REQUEST *request;
3411 struct mpr_command *cm;
3415 if ((cm = mpr_alloc_command(sc)) == NULL)
3417 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3418 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3419 request->MsgFlags = 0;
3421 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3422 cm->cm_complete = mprsas_portenable_complete;
3426 mpr_map_command(sc, cm);
3427 mpr_dprint(sc, MPR_XINFO,
3428 "mpr_send_portenable finished cm %p req %p complete %p\n",
3429 cm, cm->cm_req, cm->cm_complete);
3434 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3436 MPI2_PORT_ENABLE_REPLY *reply;
3437 struct mprsas_softc *sassc;
3443 * Currently there should be no way we can hit this case. It only
3444 * happens when we have a failure to allocate chain frames, and
3445 * port enable commands don't have S/G lists.
3447 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3448 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3449 "This should not happen!\n", __func__, cm->cm_flags);
3452 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3454 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3455 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3456 MPI2_IOCSTATUS_SUCCESS)
3457 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3459 mpr_free_command(sc, cm);
3460 if (sc->mpr_ich.ich_arg != NULL) {
3461 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3462 config_intrhook_disestablish(&sc->mpr_ich);
3463 sc->mpr_ich.ich_arg = NULL;
3467 * Done waiting for port enable to complete. Decrement the refcount.
3468 * If refcount is 0, discovery is complete and a rescan of the bus can
3471 sc->wait_for_port_enable = 0;
3472 sc->port_enable_complete = 1;
3473 wakeup(&sc->port_enable_complete);
3474 mprsas_startup_decrement(sassc);
3478 mprsas_check_id(struct mprsas_softc *sassc, int id)
3480 struct mpr_softc *sc = sassc->sc;
3484 ids = &sc->exclude_ids[0];
3485 while((name = strsep(&ids, ",")) != NULL) {
3486 if (name[0] == '\0')
3488 if (strtol(name, NULL, 0) == (long)id)