2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2014 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 /* Communications core for LSI MPT2 */
33 /* TODO Move headers to mprvar */
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/selinfo.h>
39 #include <sys/module.h>
43 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <sys/endian.h>
47 #include <sys/queue.h>
48 #include <sys/kthread.h>
49 #include <sys/taskqueue.h>
52 #include <machine/bus.h>
53 #include <machine/resource.h>
56 #include <machine/stdarg.h>
59 #include <cam/cam_ccb.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_sim.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_periph.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_message.h>
67 #if __FreeBSD_version >= 900026
68 #include <cam/scsi/smp_all.h>
71 #include <dev/mpr/mpi/mpi2_type.h>
72 #include <dev/mpr/mpi/mpi2.h>
73 #include <dev/mpr/mpi/mpi2_ioc.h>
74 #include <dev/mpr/mpi/mpi2_sas.h>
75 #include <dev/mpr/mpi/mpi2_cnfg.h>
76 #include <dev/mpr/mpi/mpi2_init.h>
77 #include <dev/mpr/mpi/mpi2_tool.h>
78 #include <dev/mpr/mpr_ioctl.h>
79 #include <dev/mpr/mprvar.h>
80 #include <dev/mpr/mpr_table.h>
81 #include <dev/mpr/mpr_sas.h>
83 #define MPRSAS_DISCOVERY_TIMEOUT 20
84 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
87 * static array to check SCSI OpCode for EEDP protection bits
89 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
90 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
91 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
92 static uint8_t op_code_prot[256] = {
93 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
96 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
111 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
113 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
114 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
115 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
116 static void mprsas_poll(struct cam_sim *sim);
117 static void mprsas_scsiio_timeout(void *data);
118 static void mprsas_abort_complete(struct mpr_softc *sc,
119 struct mpr_command *cm);
120 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
121 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
123 static void mprsas_resetdev_complete(struct mpr_softc *,
124 struct mpr_command *);
125 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
126 struct mpr_command *cm);
127 static int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm,
129 static void mprsas_async(void *callback_arg, uint32_t code,
130 struct cam_path *path, void *arg);
131 static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
132 struct ccb_getdev *cgd);
133 #if (__FreeBSD_version < 901503) || \
134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136 struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138 union ccb *done_ccb);
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142 struct mpr_command *cm);
144 #if __FreeBSD_version >= 900026
146 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148 union ccb *ccb, uint64_t sasaddr);
150 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
157 struct mprsas_target *target;
160 for (i = start; i < sassc->maxtargets; i++) {
161 target = &sassc->targets[i];
162 if (target->handle == handle)
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170 * commands before device handles have been found by discovery. Since
171 * discovery involves reading config pages and possibly sending commands,
172 * discovery actions may continue even after we receive the end of discovery
173 * event, so refcount discovery actions instead of assuming we can unfreeze
174 * the simq when we get the event.
177 mprsas_startup_increment(struct mprsas_softc *sassc)
179 MPR_FUNCTRACE(sassc->sc);
181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 if (sassc->startup_refcount++ == 0) {
183 /* just starting, freeze the simq */
184 mpr_dprint(sassc->sc, MPR_INIT,
185 "%s freezing simq\n", __func__);
186 #if __FreeBSD_version >= 1000039
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if __FreeBSD_version >= 1000039
223 mprsas_rescan_target(sassc->sc, NULL);
226 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
227 sassc->startup_refcount);
231 /* LSI's firmware requires us to stop sending commands when we're doing task
232 * management, so refcount the TMs and keep the simq frozen when any are in
236 mprsas_alloc_tm(struct mpr_softc *sc)
238 struct mpr_command *tm;
241 tm = mpr_alloc_high_priority_command(sc);
243 if (sc->sassc->tm_count++ == 0) {
244 mpr_dprint(sc, MPR_RECOVERY,
245 "%s freezing simq\n", __func__);
246 xpt_freeze_simq(sc->sassc->sim, 1);
248 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
249 sc->sassc->tm_count);
255 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
257 mpr_dprint(sc, MPR_TRACE, "%s", __func__);
261 /* if there are no TMs in use, we can release the simq. We use our
262 * own refcount so that it's easier for a diag reset to cleanup and
265 if (--sc->sassc->tm_count == 0) {
266 mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__);
267 xpt_release_simq(sc->sassc->sim, 1);
269 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
270 sc->sassc->tm_count);
272 mpr_free_high_priority_command(sc, tm);
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
278 struct mprsas_softc *sassc = sc->sassc;
280 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = xpt_alloc_ccb_nowait();
295 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
299 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
300 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
306 if (targetid == CAM_TARGET_WILDCARD)
307 ccb->ccb_h.func_code = XPT_SCAN_BUS;
309 ccb->ccb_h.func_code = XPT_SCAN_TGT;
311 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
326 /* No need to be in here if debugging isn't enabled */
327 if ((cm->cm_sc->mpr_debug & level) == 0)
330 sbuf_new(&sb, str, sizeof(str), 0);
334 if (cm->cm_ccb != NULL) {
335 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
337 sbuf_cat(&sb, path_str);
338 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 scsi_command_string(&cm->cm_ccb->csio, &sb);
340 sbuf_printf(&sb, "length %d ",
341 cm->cm_ccb->csio.dxfer_len);
344 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 cam_sim_name(cm->cm_sc->sassc->sim),
346 cam_sim_unit(cm->cm_sc->sassc->sim),
347 cam_sim_bus(cm->cm_sc->sassc->sim),
348 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
352 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 sbuf_vprintf(&sb, fmt, ap);
355 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
363 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 struct mprsas_target *targ;
369 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
374 /* XXX retry the remove after the diag reset completes? */
375 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 "0x%04x\n", __func__, handle);
377 mprsas_free_tm(sc, tm);
381 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
382 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
383 "device 0x%x\n", reply->IOCStatus, handle);
384 mprsas_free_tm(sc, tm);
388 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
389 reply->TerminationCount);
390 mpr_free_reply(sc, tm->cm_reply_data);
391 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
393 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
397 * Don't clear target if remove fails because things will get confusing.
398 * Leave the devname and sasaddr intact so that we know to avoid reusing
399 * this target id if possible, and so we can assign the same target id
400 * to this device if it comes back in the future.
402 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
405 targ->encl_handle = 0x0;
406 targ->encl_level_valid = 0x0;
407 targ->encl_level = 0x0;
408 targ->connector_name[0] = ' ';
409 targ->connector_name[1] = ' ';
410 targ->connector_name[2] = ' ';
411 targ->connector_name[3] = ' ';
412 targ->encl_slot = 0x0;
413 targ->exp_dev_handle = 0x0;
415 targ->linkrate = 0x0;
418 targ->scsi_req_desc_type = 0;
421 mprsas_free_tm(sc, tm);
426 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427 * Otherwise Volume Delete is same as Bare Drive Removal.
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
432 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 struct mpr_softc *sc;
434 struct mpr_command *cm;
435 struct mprsas_target *targ = NULL;
437 MPR_FUNCTRACE(sassc->sc);
440 targ = mprsas_find_target_by_handle(sassc, 0, handle);
442 /* FIXME: what is the action? */
443 /* We don't know about this device? */
444 mpr_dprint(sc, MPR_ERROR,
445 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449 targ->flags |= MPRSAS_TARGET_INREMOVAL;
451 cm = mprsas_alloc_tm(sc);
453 mpr_dprint(sc, MPR_ERROR,
454 "%s: command alloc failure\n", __func__);
458 mprsas_rescan_target(sc, targ);
460 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 req->DevHandle = targ->handle;
462 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
465 /* SAS Hard Link Reset / SATA Link Reset */
466 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470 cm->cm_desc.HighPriority.RequestFlags =
471 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 cm->cm_complete = mprsas_remove_volume;
473 cm->cm_complete_data = (void *)(uintptr_t)handle;
474 mpr_map_command(sc, cm);
478 * The MPT2 firmware performs debounce on the link to avoid transient link
479 * errors and false removals. When it does decide that link has been lost
480 * and a device needs to go away, it expects that the host will perform a
481 * target reset and then an op remove. The reset has the side-effect of
482 * aborting any outstanding requests for the device, which is required for
483 * the op-remove to succeed. It's not clear if the host should check for
484 * the device coming back alive after the reset.
487 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
489 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
490 struct mpr_softc *sc;
491 struct mpr_command *cm;
492 struct mprsas_target *targ = NULL;
494 MPR_FUNCTRACE(sassc->sc);
498 targ = mprsas_find_target_by_handle(sassc, 0, handle);
500 /* FIXME: what is the action? */
501 /* We don't know about this device? */
502 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
507 targ->flags |= MPRSAS_TARGET_INREMOVAL;
509 cm = mprsas_alloc_tm(sc);
511 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
516 mprsas_rescan_target(sc, targ);
518 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
519 memset(req, 0, sizeof(*req));
520 req->DevHandle = htole16(targ->handle);
521 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
522 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
524 /* SAS Hard Link Reset / SATA Link Reset */
525 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
529 cm->cm_desc.HighPriority.RequestFlags =
530 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
531 cm->cm_complete = mprsas_remove_device;
532 cm->cm_complete_data = (void *)(uintptr_t)handle;
533 mpr_map_command(sc, cm);
537 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
539 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
540 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
541 struct mprsas_target *targ;
542 struct mpr_command *next_cm;
547 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
548 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
552 * Currently there should be no way we can hit this case. It only
553 * happens when we have a failure to allocate chain frames, and
554 * task management commands don't have S/G lists.
556 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
557 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
558 "handle %#04x! This should not happen!\n", __func__,
559 tm->cm_flags, handle);
560 mprsas_free_tm(sc, tm);
565 /* XXX retry the remove after the diag reset completes? */
566 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
567 "0x%04x\n", __func__, handle);
568 mprsas_free_tm(sc, tm);
572 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
573 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
574 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
575 mprsas_free_tm(sc, tm);
579 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
580 le32toh(reply->TerminationCount));
581 mpr_free_reply(sc, tm->cm_reply_data);
582 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
584 /* Reuse the existing command */
585 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
586 memset(req, 0, sizeof(*req));
587 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
588 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
589 req->DevHandle = htole16(handle);
591 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
592 tm->cm_complete = mprsas_remove_complete;
593 tm->cm_complete_data = (void *)(uintptr_t)handle;
595 mpr_map_command(sc, tm);
597 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
599 if (targ->encl_level_valid) {
600 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
601 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
602 targ->connector_name);
604 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
607 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
608 ccb = tm->cm_complete_data;
609 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
610 mprsas_scsiio_complete(sc, tm);
615 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
617 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
619 struct mprsas_target *targ;
620 struct mprsas_lun *lun;
624 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
625 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628 * Currently there should be no way we can hit this case. It only
629 * happens when we have a failure to allocate chain frames, and
630 * task management commands don't have S/G lists.
632 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
633 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
634 "handle %#04x! This should not happen!\n", __func__,
635 tm->cm_flags, handle);
636 mprsas_free_tm(sc, tm);
641 /* most likely a chip reset */
642 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
643 "0x%04x\n", __func__, handle);
644 mprsas_free_tm(sc, tm);
648 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
649 __func__, handle, le16toh(reply->IOCStatus));
652 * Don't clear target if remove fails because things will get confusing.
653 * Leave the devname and sasaddr intact so that we know to avoid reusing
654 * this target id if possible, and so we can assign the same target id
655 * to this device if it comes back in the future.
657 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
660 targ->encl_handle = 0x0;
661 targ->encl_level_valid = 0x0;
662 targ->encl_level = 0x0;
663 targ->connector_name[0] = ' ';
664 targ->connector_name[1] = ' ';
665 targ->connector_name[2] = ' ';
666 targ->connector_name[3] = ' ';
667 targ->encl_slot = 0x0;
668 targ->exp_dev_handle = 0x0;
670 targ->linkrate = 0x0;
673 targ->scsi_req_desc_type = 0;
675 while (!SLIST_EMPTY(&targ->luns)) {
676 lun = SLIST_FIRST(&targ->luns);
677 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
682 mprsas_free_tm(sc, tm);
686 mprsas_register_events(struct mpr_softc *sc)
691 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
692 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
693 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
694 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
695 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
696 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
697 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
698 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
699 setbit(events, MPI2_EVENT_IR_VOLUME);
700 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
701 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
702 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
704 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
705 &sc->sassc->mprsas_eh);
711 mpr_attach_sas(struct mpr_softc *sc)
713 struct mprsas_softc *sassc;
719 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
721 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
727 * XXX MaxTargets could change during a reinit. since we don't
728 * resize the targets[] array during such an event, cache the value
729 * of MaxTargets here so that we don't get into trouble later. This
730 * should move into the reinit logic.
732 sassc->maxtargets = sc->facts->MaxTargets;
733 sassc->targets = malloc(sizeof(struct mprsas_target) *
734 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
735 if (!sassc->targets) {
736 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
744 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
745 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
750 unit = device_get_unit(sc->mpr_dev);
751 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
752 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
753 if (sassc->sim == NULL) {
754 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
759 TAILQ_INIT(&sassc->ev_queue);
761 /* Initialize taskqueue for Event Handling */
762 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
763 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
764 taskqueue_thread_enqueue, &sassc->ev_tq);
766 /* Run the task queue with lowest priority */
767 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
768 device_get_nameunit(sc->mpr_dev));
773 * XXX There should be a bus for every port on the adapter, but since
774 * we're just going to fake the topology for now, we'll pretend that
775 * everything is just a target on a single bus.
777 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
778 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
785 * Assume that discovery events will start right away. Freezing
787 * Hold off boot until discovery is complete.
789 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
790 sc->sassc->startup_refcount = 0;
791 mprsas_startup_increment(sassc);
793 callout_init(&sassc->discovery_callout, 1 /*mprafe*/);
798 * Register for async events so we can determine the EEDP
799 * capabilities of devices.
801 status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
804 if (status != CAM_REQ_CMP) {
805 mpr_printf(sc, "Error %#x creating sim path\n", status);
810 #if (__FreeBSD_version >= 1000006) || \
811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
814 event = AC_FOUND_DEVICE;
818 * Prior to the CAM locking improvements, we can't call
819 * xpt_register_async() with a particular path specified.
821 * If a path isn't specified, xpt_register_async() will
822 * generate a wildcard path and acquire the XPT lock while
823 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
824 * It will then drop the XPT lock once that is done.
826 * If a path is specified for xpt_register_async(), it will
827 * not acquire and drop the XPT lock around the call to
828 * xpt_action(). xpt_action() asserts that the caller
829 * holds the SIM lock, so the SIM lock has to be held when
830 * calling xpt_register_async() when the path is specified.
832 * But xpt_register_async calls xpt_for_all_devices(),
833 * which calls xptbustraverse(), which will acquire each
834 * SIM lock. When it traverses our particular bus, it will
835 * necessarily acquire the SIM lock, which will lead to a
836 * recursive lock acquisition.
838 * The CAM locking changes fix this problem by acquiring
839 * the XPT topology lock around bus traversal in
840 * xptbustraverse(), so the caller can hold the SIM lock
841 * and it does not cause a recursive lock acquisition.
843 * These __FreeBSD_version values are approximate, especially
844 * for stable/10, which is two months later than the actual
848 #if (__FreeBSD_version < 1000703) || \
849 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
851 status = xpt_register_async(event, mprsas_async, sc,
855 status = xpt_register_async(event, mprsas_async, sc,
859 if (status != CAM_REQ_CMP) {
860 mpr_dprint(sc, MPR_ERROR,
861 "Error %#x registering async handler for "
862 "AC_ADVINFO_CHANGED events\n", status);
863 xpt_free_path(sassc->path);
867 if (status != CAM_REQ_CMP) {
869 * EEDP use is the exception, not the rule.
870 * Warn the user, but do not fail to attach.
872 mpr_printf(sc, "EEDP capabilities disabled.\n");
877 mprsas_register_events(sc);
885 mpr_detach_sas(struct mpr_softc *sc)
887 struct mprsas_softc *sassc;
888 struct mprsas_lun *lun, *lun_tmp;
889 struct mprsas_target *targ;
894 if (sc->sassc == NULL)
898 mpr_deregister_events(sc, sassc->mprsas_eh);
901 * Drain and free the event handling taskqueue with the lock
902 * unheld so that any parallel processing tasks drain properly
903 * without deadlocking.
905 if (sassc->ev_tq != NULL)
906 taskqueue_free(sassc->ev_tq);
908 /* Make sure CAM doesn't wedge if we had to bail out early. */
911 /* Deregister our async handler */
912 if (sassc->path != NULL) {
913 xpt_register_async(0, mprsas_async, sc, sassc->path);
914 xpt_free_path(sassc->path);
918 if (sassc->flags & MPRSAS_IN_STARTUP)
919 xpt_release_simq(sassc->sim, 1);
921 if (sassc->sim != NULL) {
922 xpt_bus_deregister(cam_sim_path(sassc->sim));
923 cam_sim_free(sassc->sim, FALSE);
926 sassc->flags |= MPRSAS_SHUTDOWN;
929 if (sassc->devq != NULL)
930 cam_simq_free(sassc->devq);
932 for (i = 0; i < sassc->maxtargets; i++) {
933 targ = &sassc->targets[i];
934 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
938 free(sassc->targets, M_MPR);
946 mprsas_discovery_end(struct mprsas_softc *sassc)
948 struct mpr_softc *sc = sassc->sc;
952 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
953 callout_stop(&sassc->discovery_callout);
958 mprsas_action(struct cam_sim *sim, union ccb *ccb)
960 struct mprsas_softc *sassc;
962 sassc = cam_sim_softc(sim);
964 MPR_FUNCTRACE(sassc->sc);
965 mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__,
966 ccb->ccb_h.func_code);
967 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
969 switch (ccb->ccb_h.func_code) {
972 struct ccb_pathinq *cpi = &ccb->cpi;
974 cpi->version_num = 1;
975 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
976 cpi->target_sprt = 0;
977 #if __FreeBSD_version >= 1000039
978 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
980 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
982 cpi->hba_eng_cnt = 0;
983 cpi->max_target = sassc->maxtargets - 1;
985 cpi->initiator_id = sassc->maxtargets - 1;
986 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
987 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
988 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
989 cpi->unit_number = cam_sim_unit(sim);
990 cpi->bus_id = cam_sim_bus(sim);
992 * XXXSLM-I think this needs to change based on config page or
993 * something instead of hardcoded to 150000.
995 cpi->base_transfer_speed = 150000;
996 cpi->transport = XPORT_SAS;
997 cpi->transport_version = 0;
998 cpi->protocol = PROTO_SCSI;
999 cpi->protocol_version = SCSI_REV_SPC;
1000 #if __FreeBSD_version >= 800001
1002 * XXXSLM-probably need to base this number on max SGL's and
1005 cpi->maxio = 256 * 1024;
1007 cpi->ccb_h.status = CAM_REQ_CMP;
1010 case XPT_GET_TRAN_SETTINGS:
1012 struct ccb_trans_settings *cts;
1013 struct ccb_trans_settings_sas *sas;
1014 struct ccb_trans_settings_scsi *scsi;
1015 struct mprsas_target *targ;
1018 sas = &cts->xport_specific.sas;
1019 scsi = &cts->proto_specific.scsi;
1021 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1022 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1023 cts->ccb_h.target_id));
1024 targ = &sassc->targets[cts->ccb_h.target_id];
1025 if (targ->handle == 0x0) {
1026 cts->ccb_h.status = CAM_DEV_NOT_THERE;
1030 cts->protocol_version = SCSI_REV_SPC2;
1031 cts->transport = XPORT_SAS;
1032 cts->transport_version = 0;
1034 sas->valid = CTS_SAS_VALID_SPEED;
1035 switch (targ->linkrate) {
1037 sas->bitrate = 150000;
1040 sas->bitrate = 300000;
1043 sas->bitrate = 600000;
1049 cts->protocol = PROTO_SCSI;
1050 scsi->valid = CTS_SCSI_VALID_TQ;
1051 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1053 cts->ccb_h.status = CAM_REQ_CMP;
1056 case XPT_CALC_GEOMETRY:
1057 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1058 ccb->ccb_h.status = CAM_REQ_CMP;
1061 mpr_dprint(sassc->sc, MPR_XINFO,
1062 "mprsas_action XPT_RESET_DEV\n");
1063 mprsas_action_resetdev(sassc, ccb);
1068 mpr_dprint(sassc->sc, MPR_XINFO,
1069 "mprsas_action faking success for abort or reset\n");
1070 ccb->ccb_h.status = CAM_REQ_CMP;
1073 mprsas_action_scsiio(sassc, ccb);
1075 #if __FreeBSD_version >= 900026
1077 mprsas_action_smpio(sassc, ccb);
1081 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1089 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1090 target_id_t target_id, lun_id_t lun_id)
1092 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1093 struct cam_path *path;
1095 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1096 ac_code, target_id, (uintmax_t)lun_id);
1098 if (xpt_create_path(&path, NULL,
1099 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1100 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1105 xpt_async(ac_code, path, NULL);
1106 xpt_free_path(path);
1110 mprsas_complete_all_commands(struct mpr_softc *sc)
1112 struct mpr_command *cm;
1117 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1119 /* complete all commands with a NULL reply */
1120 for (i = 1; i < sc->num_reqs; i++) {
1121 cm = &sc->commands[i];
1122 cm->cm_reply = NULL;
1125 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1126 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1128 if (cm->cm_complete != NULL) {
1129 mprsas_log_command(cm, MPR_RECOVERY,
1130 "completing cm %p state %x ccb %p for diag reset\n",
1131 cm, cm->cm_state, cm->cm_ccb);
1132 cm->cm_complete(sc, cm);
1136 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1137 mprsas_log_command(cm, MPR_RECOVERY,
1138 "waking up cm %p state %x ccb %p for diag reset\n",
1139 cm, cm->cm_state, cm->cm_ccb);
1144 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1145 /* this should never happen, but if it does, log */
1146 mprsas_log_command(cm, MPR_RECOVERY,
1147 "cm %p state %x flags 0x%x ccb %p during diag "
1148 "reset\n", cm, cm->cm_state, cm->cm_flags,
1155 mprsas_handle_reinit(struct mpr_softc *sc)
1159 /* Go back into startup mode and freeze the simq, so that CAM
1160 * doesn't send any commands until after we've rediscovered all
1161 * targets and found the proper device handles for them.
1163 * After the reset, portenable will trigger discovery, and after all
1164 * discovery-related activities have finished, the simq will be
1167 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1168 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1169 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1170 mprsas_startup_increment(sc->sassc);
1172 /* notify CAM of a bus reset */
1173 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1176 /* complete and cleanup after all outstanding commands */
1177 mprsas_complete_all_commands(sc);
1179 mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command "
1180 "completion\n", __func__, sc->sassc->startup_refcount,
1181 sc->sassc->tm_count);
1183 /* zero all the target handles, since they may change after the
1184 * reset, and we have to rediscover all the targets and use the new
1187 for (i = 0; i < sc->sassc->maxtargets; i++) {
1188 if (sc->sassc->targets[i].outstanding != 0)
1189 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1190 i, sc->sassc->targets[i].outstanding);
1191 sc->sassc->targets[i].handle = 0x0;
1192 sc->sassc->targets[i].exp_dev_handle = 0x0;
1193 sc->sassc->targets[i].outstanding = 0;
1194 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1198 mprsas_tm_timeout(void *data)
1200 struct mpr_command *tm = data;
1201 struct mpr_softc *sc = tm->cm_sc;
1203 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1205 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1206 "task mgmt %p timed out\n", tm);
1211 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1212 struct mpr_command *tm)
1214 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1215 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1216 unsigned int cm_count = 0;
1217 struct mpr_command *cm;
1218 struct mprsas_target *targ;
1220 callout_stop(&tm->cm_callout);
1222 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1223 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1227 * Currently there should be no way we can hit this case. It only
1228 * happens when we have a failure to allocate chain frames, and
1229 * task management commands don't have S/G lists.
1231 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1232 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1233 "This should not happen!\n", __func__, tm->cm_flags);
1234 mprsas_free_tm(sc, tm);
1238 if (reply == NULL) {
1239 mprsas_log_command(tm, MPR_RECOVERY,
1240 "NULL reset reply for tm %p\n", tm);
1241 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1242 /* this completion was due to a reset, just cleanup */
1243 targ->flags &= ~MPRSAS_TARGET_INRESET;
1245 mprsas_free_tm(sc, tm);
1248 /* we should have gotten a reply. */
1254 mprsas_log_command(tm, MPR_RECOVERY,
1255 "logical unit reset status 0x%x code 0x%x count %u\n",
1256 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1257 le32toh(reply->TerminationCount));
1259 /* See if there are any outstanding commands for this LUN.
1260 * This could be made more efficient by using a per-LU data
1261 * structure of some sort.
1263 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1264 if (cm->cm_lun == tm->cm_lun)
1268 if (cm_count == 0) {
1269 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1270 "logical unit %u finished recovery after reset\n",
1273 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1276 /* we've finished recovery for this logical unit. check and
1277 * see if some other logical unit has a timedout command
1278 * that needs to be processed.
1280 cm = TAILQ_FIRST(&targ->timedout_commands);
1282 mprsas_send_abort(sc, tm, cm);
1286 mprsas_free_tm(sc, tm);
1290 /* if we still have commands for this LUN, the reset
1291 * effectively failed, regardless of the status reported.
1292 * Escalate to a target reset.
1294 mprsas_log_command(tm, MPR_RECOVERY,
1295 "logical unit reset complete for tm %p, but still have %u "
1296 "command(s)\n", tm, cm_count);
1297 mprsas_send_reset(sc, tm,
1298 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1303 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1305 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1306 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1307 struct mprsas_target *targ;
1309 callout_stop(&tm->cm_callout);
1311 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1312 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1316 * Currently there should be no way we can hit this case. It only
1317 * happens when we have a failure to allocate chain frames, and
1318 * task management commands don't have S/G lists.
1320 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1321 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! "
1322 "This should not happen!\n", __func__, tm->cm_flags);
1323 mprsas_free_tm(sc, tm);
1327 if (reply == NULL) {
1328 mprsas_log_command(tm, MPR_RECOVERY,
1329 "NULL reset reply for tm %p\n", tm);
1330 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1331 /* this completion was due to a reset, just cleanup */
1332 targ->flags &= ~MPRSAS_TARGET_INRESET;
1334 mprsas_free_tm(sc, tm);
1337 /* we should have gotten a reply. */
1343 mprsas_log_command(tm, MPR_RECOVERY,
1344 "target reset status 0x%x code 0x%x count %u\n",
1345 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1346 le32toh(reply->TerminationCount));
1348 targ->flags &= ~MPRSAS_TARGET_INRESET;
1350 if (targ->outstanding == 0) {
1351 /* we've finished recovery for this target and all
1352 * of its logical units.
1354 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1355 "recovery finished after target reset\n");
1357 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1361 mprsas_free_tm(sc, tm);
1364 /* after a target reset, if this target still has
1365 * outstanding commands, the reset effectively failed,
1366 * regardless of the status reported. escalate.
1368 mprsas_log_command(tm, MPR_RECOVERY,
1369 "target reset complete for tm %p, but still have %u "
1370 "command(s)\n", tm, targ->outstanding);
1375 #define MPR_RESET_TIMEOUT 30
1378 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1380 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1381 struct mprsas_target *target;
1384 target = tm->cm_targ;
1385 if (target->handle == 0) {
1386 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1387 __func__, target->tid);
1391 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1392 req->DevHandle = htole16(target->handle);
1393 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1394 req->TaskType = type;
1396 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1397 /* XXX Need to handle invalid LUNs */
1398 MPR_SET_LUN(req->LUN, tm->cm_lun);
1399 tm->cm_targ->logical_unit_resets++;
1400 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1401 "sending logical unit reset\n");
1402 tm->cm_complete = mprsas_logical_unit_reset_complete;
1404 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1406 * Target reset method =
1407 * SAS Hard Link Reset / SATA Link Reset
1409 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1410 tm->cm_targ->target_resets++;
1411 tm->cm_targ->flags |= MPRSAS_TARGET_INRESET;
1412 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1413 "sending target reset\n");
1414 tm->cm_complete = mprsas_target_reset_complete;
1417 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1421 mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid,
1423 if (target->encl_level_valid) {
1424 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1425 "connector name (%4s)\n", target->encl_level,
1426 target->encl_slot, target->connector_name);
1430 tm->cm_desc.HighPriority.RequestFlags =
1431 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1432 tm->cm_complete_data = (void *)tm;
1434 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1435 mprsas_tm_timeout, tm);
1437 err = mpr_map_command(sc, tm);
1439 mprsas_log_command(tm, MPR_RECOVERY,
1440 "error %d sending reset type %u\n",
1448 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1450 struct mpr_command *cm;
1451 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1452 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1453 struct mprsas_target *targ;
1455 callout_stop(&tm->cm_callout);
1457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1458 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1462 * Currently there should be no way we can hit this case. It only
1463 * happens when we have a failure to allocate chain frames, and
1464 * task management commands don't have S/G lists.
1466 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1467 mprsas_log_command(tm, MPR_RECOVERY,
1468 "cm_flags = %#x for abort %p TaskMID %u!\n",
1469 tm->cm_flags, tm, le16toh(req->TaskMID));
1470 mprsas_free_tm(sc, tm);
1474 if (reply == NULL) {
1475 mprsas_log_command(tm, MPR_RECOVERY,
1476 "NULL abort reply for tm %p TaskMID %u\n",
1477 tm, le16toh(req->TaskMID));
1478 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1479 /* this completion was due to a reset, just cleanup */
1481 mprsas_free_tm(sc, tm);
1484 /* we should have gotten a reply. */
1490 mprsas_log_command(tm, MPR_RECOVERY,
1491 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1492 le16toh(req->TaskMID),
1493 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1494 le32toh(reply->TerminationCount));
1496 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1498 /* if there are no more timedout commands, we're done with
1499 * error recovery for this target.
1501 mprsas_log_command(tm, MPR_RECOVERY,
1502 "finished recovery after aborting TaskMID %u\n",
1503 le16toh(req->TaskMID));
1506 mprsas_free_tm(sc, tm);
1508 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1509 /* abort success, but we have more timedout commands to abort */
1510 mprsas_log_command(tm, MPR_RECOVERY,
1511 "continuing recovery after aborting TaskMID %u\n",
1512 le16toh(req->TaskMID));
1514 mprsas_send_abort(sc, tm, cm);
1517 /* we didn't get a command completion, so the abort
1518 * failed as far as we're concerned. escalate.
1520 mprsas_log_command(tm, MPR_RECOVERY,
1521 "abort failed for TaskMID %u tm %p\n",
1522 le16toh(req->TaskMID), tm);
1524 mprsas_send_reset(sc, tm,
1525 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1529 #define MPR_ABORT_TIMEOUT 5
1532 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1533 struct mpr_command *cm)
1535 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1536 struct mprsas_target *targ;
1540 if (targ->handle == 0) {
1541 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1542 __func__, cm->cm_ccb->ccb_h.target_id);
1546 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1547 "Aborting command %p\n", cm);
1549 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1550 req->DevHandle = htole16(targ->handle);
1551 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1552 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1554 /* XXX Need to handle invalid LUNs */
1555 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1557 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1560 tm->cm_desc.HighPriority.RequestFlags =
1561 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1562 tm->cm_complete = mprsas_abort_complete;
1563 tm->cm_complete_data = (void *)tm;
1564 tm->cm_targ = cm->cm_targ;
1565 tm->cm_lun = cm->cm_lun;
1567 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1568 mprsas_tm_timeout, tm);
1572 err = mpr_map_command(sc, tm);
1574 mprsas_log_command(tm, MPR_RECOVERY,
1575 "error %d sending abort for cm %p SMID %u\n",
1576 err, cm, req->TaskMID);
1582 mprsas_scsiio_timeout(void *data)
1584 struct mpr_softc *sc;
1585 struct mpr_command *cm;
1586 struct mprsas_target *targ;
1588 cm = (struct mpr_command *)data;
1592 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1594 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1597 * Run the interrupt handler to make sure it's not pending. This
1598 * isn't perfect because the command could have already completed
1599 * and been re-used, though this is unlikely.
1601 mpr_intr_locked(sc);
1602 if (cm->cm_state == MPR_CM_STATE_FREE) {
1603 mprsas_log_command(cm, MPR_XINFO,
1604 "SCSI command %p almost timed out\n", cm);
1608 if (cm->cm_ccb == NULL) {
1609 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1616 mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p "
1617 "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1619 if (targ->encl_level_valid) {
1620 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1621 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1622 targ->connector_name);
1625 /* XXX first, check the firmware state, to see if it's still
1626 * operational. if not, do a diag reset.
1629 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1630 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1631 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1633 if (targ->tm != NULL) {
1634 /* target already in recovery, just queue up another
1635 * timedout command to be processed later.
1637 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1638 "processing by tm %p\n", cm, targ->tm);
1640 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1641 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1644 /* start recovery by aborting the first timedout command */
1645 mprsas_send_abort(sc, targ->tm, cm);
1648 /* XXX queue this target up for recovery once a TM becomes
1649 * available. The firmware only has a limited number of
1650 * HighPriority credits for the high priority requests used
1651 * for task management, and we ran out.
1653 * Isilon: don't worry about this for now, since we have
1654 * more credits than disks in an enclosure, and limit
1655 * ourselves to one TM per target for recovery.
1657 mpr_dprint(sc, MPR_RECOVERY,
1658 "timedout cm %p failed to allocate a tm\n", cm);
1663 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1665 MPI2_SCSI_IO_REQUEST *req;
1666 struct ccb_scsiio *csio;
1667 struct mpr_softc *sc;
1668 struct mprsas_target *targ;
1669 struct mprsas_lun *lun;
1670 struct mpr_command *cm;
1671 uint8_t i, lba_byte, *ref_tag_addr;
1672 uint16_t eedp_flags;
1673 uint32_t mpi_control;
1677 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1680 targ = &sassc->targets[csio->ccb_h.target_id];
1681 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1682 if (targ->handle == 0x0) {
1683 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1684 __func__, csio->ccb_h.target_id);
1685 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1689 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1690 mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO "
1691 "supported %u\n", __func__, csio->ccb_h.target_id);
1692 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1697 * Sometimes, it is possible to get a command that is not "In
1698 * Progress" and was actually aborted by the upper layer. Check for
1699 * this here and complete the command without error.
1701 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1702 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1703 "target %u\n", __func__, csio->ccb_h.target_id);
1708 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1709 * that the volume has timed out. We want volumes to be enumerated
1710 * until they are deleted/removed, not just failed.
1712 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1713 if (targ->devinfo == 0)
1714 csio->ccb_h.status = CAM_REQ_CMP;
1716 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1721 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1722 mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__);
1723 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1728 cm = mpr_alloc_command(sc);
1729 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1731 mpr_free_command(sc, cm);
1733 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1734 xpt_freeze_simq(sassc->sim, 1);
1735 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1737 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1738 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1743 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1744 bzero(req, sizeof(*req));
1745 req->DevHandle = htole16(targ->handle);
1746 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1748 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1749 req->SenseBufferLength = MPR_SENSE_LEN;
1751 req->ChainOffset = 0;
1752 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1757 req->DataLength = htole32(csio->dxfer_len);
1758 req->BidirectionalDataLength = 0;
1759 req->IoFlags = htole16(csio->cdb_len);
1762 /* Note: BiDirectional transfers are not supported */
1763 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1765 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1766 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1769 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1770 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1774 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1778 if (csio->cdb_len == 32)
1779 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1781 * It looks like the hardware doesn't require an explicit tag
1782 * number for each transaction. SAM Task Management not supported
1785 switch (csio->tag_action) {
1786 case MSG_HEAD_OF_Q_TAG:
1787 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1789 case MSG_ORDERED_Q_TAG:
1790 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1793 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1795 case CAM_TAG_ACTION_NONE:
1796 case MSG_SIMPLE_Q_TAG:
1798 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1801 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1802 req->Control = htole32(mpi_control);
1804 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1805 mpr_free_command(sc, cm);
1806 ccb->ccb_h.status = CAM_LUN_INVALID;
1811 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1812 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1814 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1815 req->IoFlags = htole16(csio->cdb_len);
1818 * Check if EEDP is supported and enabled. If it is then check if the
1819 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1820 * is formatted for EEDP support. If all of this is true, set CDB up
1821 * for EEDP transfer.
1823 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1824 if (sc->eedp_enabled && eedp_flags) {
1825 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1826 if (lun->lun_id == csio->ccb_h.target_lun) {
1831 if ((lun != NULL) && (lun->eedp_formatted)) {
1832 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1833 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1834 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1835 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1836 req->EEDPFlags = htole16(eedp_flags);
1839 * If CDB less than 32, fill in Primary Ref Tag with
1840 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1841 * already there. Also, set protection bit. FreeBSD
1842 * currently does not support CDBs bigger than 16, but
1843 * the code doesn't hurt, and will be here for the
1846 if (csio->cdb_len != 32) {
1847 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1848 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1849 PrimaryReferenceTag;
1850 for (i = 0; i < 4; i++) {
1852 req->CDB.CDB32[lba_byte + i];
1855 req->CDB.EEDP32.PrimaryReferenceTag =
1857 CDB.EEDP32.PrimaryReferenceTag);
1858 req->CDB.EEDP32.PrimaryApplicationTagMask =
1860 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1864 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1865 req->EEDPFlags = htole16(eedp_flags);
1866 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1872 cm->cm_length = csio->dxfer_len;
1873 if (cm->cm_length != 0) {
1875 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1879 cm->cm_sge = &req->SGL;
1880 cm->cm_sglsize = (32 - 24) * 4;
1881 cm->cm_complete = mprsas_scsiio_complete;
1882 cm->cm_complete_data = ccb;
1884 cm->cm_lun = csio->ccb_h.target_lun;
1887 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1888 * and set descriptor type.
1890 if (targ->scsi_req_desc_type ==
1891 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1892 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1893 cm->cm_desc.FastPathSCSIIO.RequestFlags =
1894 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1895 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1897 cm->cm_desc.SCSIIO.RequestFlags =
1898 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1899 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1902 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1903 mprsas_scsiio_timeout, cm);
1906 targ->outstanding++;
1907 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1908 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1910 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1911 __func__, cm, ccb, targ->outstanding);
1913 mpr_map_command(sc, cm);
1918 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1922 switch (response_code) {
1923 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1924 desc = "task management request completed";
1926 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1927 desc = "invalid frame";
1929 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1930 desc = "task management request not supported";
1932 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1933 desc = "task management request failed";
1935 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1936 desc = "task management request succeeded";
1938 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1939 desc = "invalid lun";
1942 desc = "overlapped tag attempted";
1944 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1945 desc = "task queued, however not sent to target";
1951 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1956 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1959 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1960 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1964 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1965 MPI2_IOCSTATUS_MASK;
1966 u8 scsi_state = mpi_reply->SCSIState;
1967 u8 scsi_status = mpi_reply->SCSIStatus;
1968 char *desc_ioc_state = NULL;
1969 char *desc_scsi_status = NULL;
1970 char *desc_scsi_state = sc->tmp_string;
1971 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1973 if (log_info == 0x31170000)
1976 switch (ioc_status) {
1977 case MPI2_IOCSTATUS_SUCCESS:
1978 desc_ioc_state = "success";
1980 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1981 desc_ioc_state = "invalid function";
1983 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1984 desc_ioc_state = "scsi recovered error";
1986 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1987 desc_ioc_state = "scsi invalid dev handle";
1989 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1990 desc_ioc_state = "scsi device not there";
1992 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1993 desc_ioc_state = "scsi data overrun";
1995 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1996 desc_ioc_state = "scsi data underrun";
1998 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1999 desc_ioc_state = "scsi io data error";
2001 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2002 desc_ioc_state = "scsi protocol error";
2004 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2005 desc_ioc_state = "scsi task terminated";
2007 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2008 desc_ioc_state = "scsi residual mismatch";
2010 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2011 desc_ioc_state = "scsi task mgmt failed";
2013 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2014 desc_ioc_state = "scsi ioc terminated";
2016 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2017 desc_ioc_state = "scsi ext terminated";
2019 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2020 desc_ioc_state = "eedp guard error";
2022 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2023 desc_ioc_state = "eedp ref tag error";
2025 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2026 desc_ioc_state = "eedp app tag error";
2029 desc_ioc_state = "unknown";
2033 switch (scsi_status) {
2034 case MPI2_SCSI_STATUS_GOOD:
2035 desc_scsi_status = "good";
2037 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2038 desc_scsi_status = "check condition";
2040 case MPI2_SCSI_STATUS_CONDITION_MET:
2041 desc_scsi_status = "condition met";
2043 case MPI2_SCSI_STATUS_BUSY:
2044 desc_scsi_status = "busy";
2046 case MPI2_SCSI_STATUS_INTERMEDIATE:
2047 desc_scsi_status = "intermediate";
2049 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2050 desc_scsi_status = "intermediate condmet";
2052 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2053 desc_scsi_status = "reservation conflict";
2055 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2056 desc_scsi_status = "command terminated";
2058 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2059 desc_scsi_status = "task set full";
2061 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2062 desc_scsi_status = "aca active";
2064 case MPI2_SCSI_STATUS_TASK_ABORTED:
2065 desc_scsi_status = "task aborted";
2068 desc_scsi_status = "unknown";
2072 desc_scsi_state[0] = '\0';
2074 desc_scsi_state = " ";
2075 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2076 strcat(desc_scsi_state, "response info ");
2077 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2078 strcat(desc_scsi_state, "state terminated ");
2079 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2080 strcat(desc_scsi_state, "no status ");
2081 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2082 strcat(desc_scsi_state, "autosense failed ");
2083 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2084 strcat(desc_scsi_state, "autosense valid ");
2086 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2087 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2088 if (targ->encl_level_valid) {
2089 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2090 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2091 targ->connector_name);
2093 /* We can add more detail about underflow data here
2096 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2097 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2098 desc_scsi_state, scsi_state);
2100 if (sc->mpr_debug & MPR_XINFO &&
2101 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2102 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2103 scsi_sense_print(csio);
2104 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2107 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2108 response_info = le32toh(mpi_reply->ResponseInfo);
2109 response_bytes = (u8 *)&response_info;
2110 mpr_response_code(sc,response_bytes[0]);
2115 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2117 MPI2_SCSI_IO_REPLY *rep;
2119 struct ccb_scsiio *csio;
2120 struct mprsas_softc *sassc;
2121 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2122 u8 *TLR_bits, TLR_on;
2127 mpr_dprint(sc, MPR_TRACE,
2128 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2129 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2130 cm->cm_targ->outstanding);
2132 callout_stop(&cm->cm_callout);
2133 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2136 ccb = cm->cm_complete_data;
2138 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2140 * XXX KDM if the chain allocation fails, does it matter if we do
2141 * the sync and unload here? It is simpler to do it in every case,
2142 * assuming it doesn't cause problems.
2144 if (cm->cm_data != NULL) {
2145 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2146 dir = BUS_DMASYNC_POSTREAD;
2147 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2148 dir = BUS_DMASYNC_POSTWRITE;
2149 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2150 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2153 cm->cm_targ->completed++;
2154 cm->cm_targ->outstanding--;
2155 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2156 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2158 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2159 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2160 if (cm->cm_reply != NULL)
2161 mprsas_log_command(cm, MPR_RECOVERY,
2162 "completed timedout cm %p ccb %p during recovery "
2163 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2164 le16toh(rep->IOCStatus), rep->SCSIStatus,
2165 rep->SCSIState, le32toh(rep->TransferCount));
2167 mprsas_log_command(cm, MPR_RECOVERY,
2168 "completed timedout cm %p ccb %p during recovery\n",
2170 } else if (cm->cm_targ->tm != NULL) {
2171 if (cm->cm_reply != NULL)
2172 mprsas_log_command(cm, MPR_RECOVERY,
2173 "completed cm %p ccb %p during recovery "
2174 "ioc %x scsi %x state %x xfer %u\n",
2175 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2176 rep->SCSIStatus, rep->SCSIState,
2177 le32toh(rep->TransferCount));
2179 mprsas_log_command(cm, MPR_RECOVERY,
2180 "completed cm %p ccb %p during recovery\n",
2182 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2183 mprsas_log_command(cm, MPR_RECOVERY,
2184 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2187 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2189 * We ran into an error after we tried to map the command,
2190 * so we're getting a callback without queueing the command
2191 * to the hardware. So we set the status here, and it will
2192 * be retained below. We'll go through the "fast path",
2193 * because there can be no reply when we haven't actually
2194 * gone out to the hardware.
2196 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2199 * Currently the only error included in the mask is
2200 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2201 * chain frames. We need to freeze the queue until we get
2202 * a command that completed without this error, which will
2203 * hopefully have some chain frames attached that we can
2204 * use. If we wanted to get smarter about it, we would
2205 * only unfreeze the queue in this condition when we're
2206 * sure that we're getting some chain frames back. That's
2207 * probably unnecessary.
2209 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2210 xpt_freeze_simq(sassc->sim, 1);
2211 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2212 mpr_dprint(sc, MPR_INFO, "Error sending command, "
2213 "freezing SIM queue\n");
2218 * If this is a Start Stop Unit command and it was issued by the driver
2219 * during shutdown, decrement the refcount to account for all of the
2220 * commands that were sent. All SSU commands should be completed before
2221 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2224 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2225 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2229 /* Take the fast path to completion */
2230 if (cm->cm_reply == NULL) {
2231 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2232 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2233 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2235 ccb->ccb_h.status = CAM_REQ_CMP;
2236 ccb->csio.scsi_status = SCSI_STATUS_OK;
2238 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2239 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2240 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2241 mpr_dprint(sc, MPR_XINFO,
2242 "Unfreezing SIM queue\n");
2247 * There are two scenarios where the status won't be
2248 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2249 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2251 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2253 * Freeze the dev queue so that commands are
2254 * executed in the correct order with after error
2257 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2258 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2260 mpr_free_command(sc, cm);
2265 mprsas_log_command(cm, MPR_XINFO,
2266 "ioc %x scsi %x state %x xfer %u\n",
2267 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2268 le32toh(rep->TransferCount));
2270 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2271 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2272 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2274 case MPI2_IOCSTATUS_SUCCESS:
2275 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2277 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2278 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2279 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2281 /* Completion failed at the transport level. */
2282 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2283 MPI2_SCSI_STATE_TERMINATED)) {
2284 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2288 /* In a modern packetized environment, an autosense failure
2289 * implies that there's not much else that can be done to
2290 * recover the command.
2292 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2293 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2298 * CAM doesn't care about SAS Response Info data, but if this is
2299 * the state check if TLR should be done. If not, clear the
2300 * TLR_bits for the target.
2302 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2303 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2304 == MPR_SCSI_RI_INVALID_FRAME)) {
2305 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2306 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2310 * Intentionally override the normal SCSI status reporting
2311 * for these two cases. These are likely to happen in a
2312 * multi-initiator environment, and we want to make sure that
2313 * CAM retries these commands rather than fail them.
2315 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2316 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2317 ccb->ccb_h.status = CAM_REQ_ABORTED;
2321 /* Handle normal status and sense */
2322 csio->scsi_status = rep->SCSIStatus;
2323 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2324 ccb->ccb_h.status = CAM_REQ_CMP;
2326 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2328 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2329 int sense_len, returned_sense_len;
2331 returned_sense_len = min(le32toh(rep->SenseCount),
2332 sizeof(struct scsi_sense_data));
2333 if (returned_sense_len < csio->sense_len)
2334 csio->sense_resid = csio->sense_len -
2337 csio->sense_resid = 0;
2339 sense_len = min(returned_sense_len,
2340 csio->sense_len - csio->sense_resid);
2341 bzero(&csio->sense_data, sizeof(csio->sense_data));
2342 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2343 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2347 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2348 * and it's page code 0 (Supported Page List), and there is
2349 * inquiry data, and this is for a sequential access device, and
2350 * the device is an SSP target, and TLR is supported by the
2351 * controller, turn the TLR_bits value ON if page 0x90 is
2354 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2355 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2356 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2357 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2358 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2359 T_SEQUENTIAL) && (sc->control_TLR) &&
2360 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2361 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2362 vpd_list = (struct scsi_vpd_supported_page_list *)
2364 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2366 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2367 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2368 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2369 csio->cdb_io.cdb_bytes[4];
2370 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2371 if (vpd_list->list[i] == 0x90) {
2378 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2379 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2381 * If devinfo is 0 this will be a volume. In that case don't
2382 * tell CAM that the volume is not there. We want volumes to
2383 * be enumerated until they are deleted/removed, not just
2386 if (cm->cm_targ->devinfo == 0)
2387 ccb->ccb_h.status = CAM_REQ_CMP;
2389 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2391 case MPI2_IOCSTATUS_INVALID_SGL:
2392 mpr_print_scsiio_cmd(sc, cm);
2393 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2395 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2397 * This is one of the responses that comes back when an I/O
2398 * has been aborted. If it is because of a timeout that we
2399 * initiated, just set the status to CAM_CMD_TIMEOUT.
2400 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2401 * command is the same (it gets retried, subject to the
2402 * retry counter), the only difference is what gets printed
2405 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2406 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2408 ccb->ccb_h.status = CAM_REQ_ABORTED;
2410 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2411 /* resid is ignored for this condition */
2413 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2415 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2416 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2418 * Since these are generally external (i.e. hopefully
2419 * transient transport-related) errors, retry these without
2420 * decrementing the retry count.
2422 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2423 mprsas_log_command(cm, MPR_INFO,
2424 "terminated ioc %x scsi %x state %x xfer %u\n",
2425 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2426 le32toh(rep->TransferCount));
2428 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2429 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2430 case MPI2_IOCSTATUS_INVALID_VPID:
2431 case MPI2_IOCSTATUS_INVALID_FIELD:
2432 case MPI2_IOCSTATUS_INVALID_STATE:
2433 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2434 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2435 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2436 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2437 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2439 mprsas_log_command(cm, MPR_XINFO,
2440 "completed ioc %x scsi %x state %x xfer %u\n",
2441 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2442 le32toh(rep->TransferCount));
2443 csio->resid = cm->cm_length;
2444 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2448 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2450 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2451 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2452 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2453 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2457 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2458 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2459 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2462 mpr_free_command(sc, cm);
2466 #if __FreeBSD_version >= 900026
2468 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2470 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2471 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2475 ccb = cm->cm_complete_data;
2478 * Currently there should be no way we can hit this case. It only
2479 * happens when we have a failure to allocate chain frames, and SMP
2480 * commands require two S/G elements only. That should be handled
2481 * in the standard request size.
2483 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2484 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2485 __func__, cm->cm_flags);
2486 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2490 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2492 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2493 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2497 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2498 sasaddr = le32toh(req->SASAddress.Low);
2499 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2501 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2502 MPI2_IOCSTATUS_SUCCESS ||
2503 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2504 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2505 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2506 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2510 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address "
2511 "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr);
2513 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2514 ccb->ccb_h.status = CAM_REQ_CMP;
2516 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2520 * We sync in both directions because we had DMAs in the S/G list
2521 * in both directions.
2523 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2524 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2525 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2526 mpr_free_command(sc, cm);
2531 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2534 struct mpr_command *cm;
2535 uint8_t *request, *response;
2536 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2537 struct mpr_softc *sc;
2545 #if (__FreeBSD_version >= 1000028) || \
2546 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2547 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2548 case CAM_DATA_PADDR:
2549 case CAM_DATA_SG_PADDR:
2551 * XXX We don't yet support physical addresses here.
2553 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2554 "supported\n", __func__);
2555 ccb->ccb_h.status = CAM_REQ_INVALID;
2560 * The chip does not support more than one buffer for the
2561 * request or response.
2563 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2564 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2565 mpr_dprint(sc, MPR_ERROR,
2566 "%s: multiple request or response buffer segments "
2567 "not supported for SMP\n", __func__);
2568 ccb->ccb_h.status = CAM_REQ_INVALID;
2574 * The CAM_SCATTER_VALID flag was originally implemented
2575 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2576 * We have two. So, just take that flag to mean that we
2577 * might have S/G lists, and look at the S/G segment count
2578 * to figure out whether that is the case for each individual
2581 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2582 bus_dma_segment_t *req_sg;
2584 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2585 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2587 request = ccb->smpio.smp_request;
2589 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2590 bus_dma_segment_t *rsp_sg;
2592 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2593 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2595 response = ccb->smpio.smp_response;
2597 case CAM_DATA_VADDR:
2598 request = ccb->smpio.smp_request;
2599 response = ccb->smpio.smp_response;
2602 ccb->ccb_h.status = CAM_REQ_INVALID;
2606 #else /* __FreeBSD_version < 1000028 */
2608 * XXX We don't yet support physical addresses here.
2610 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2611 mpr_printf(sc, "%s: physical addresses not supported\n",
2613 ccb->ccb_h.status = CAM_REQ_INVALID;
2619 * If the user wants to send an S/G list, check to make sure they
2620 * have single buffers.
2622 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2624 * The chip does not support more than one buffer for the
2625 * request or response.
2627 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2628 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2629 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2630 "response buffer segments not supported for SMP\n",
2632 ccb->ccb_h.status = CAM_REQ_INVALID;
2638 * The CAM_SCATTER_VALID flag was originally implemented
2639 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2640 * We have two. So, just take that flag to mean that we
2641 * might have S/G lists, and look at the S/G segment count
2642 * to figure out whether that is the case for each individual
2645 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2646 bus_dma_segment_t *req_sg;
2648 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2649 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2651 request = ccb->smpio.smp_request;
2653 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2654 bus_dma_segment_t *rsp_sg;
2656 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2657 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2659 response = ccb->smpio.smp_response;
2661 request = ccb->smpio.smp_request;
2662 response = ccb->smpio.smp_response;
2664 #endif /* __FreeBSD_version < 1000028 */
2666 cm = mpr_alloc_command(sc);
2668 mpr_dprint(sc, MPR_ERROR,
2669 "%s: cannot allocate command\n", __func__);
2670 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2675 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2676 bzero(req, sizeof(*req));
2677 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2679 /* Allow the chip to use any route to this SAS address. */
2680 req->PhysicalPort = 0xff;
2682 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2684 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2686 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2687 "%#jx\n", __func__, (uintmax_t)sasaddr);
2689 mpr_init_sge(cm, req, &req->SGL);
2692 * Set up a uio to pass into mpr_map_command(). This allows us to
2693 * do one map command, and one busdma call in there.
2695 cm->cm_uio.uio_iov = cm->cm_iovec;
2696 cm->cm_uio.uio_iovcnt = 2;
2697 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2700 * The read/write flag isn't used by busdma, but set it just in
2701 * case. This isn't exactly accurate, either, since we're going in
2704 cm->cm_uio.uio_rw = UIO_WRITE;
2706 cm->cm_iovec[0].iov_base = request;
2707 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2708 cm->cm_iovec[1].iov_base = response;
2709 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2711 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2712 cm->cm_iovec[1].iov_len;
2715 * Trigger a warning message in mpr_data_cb() for the user if we
2716 * wind up exceeding two S/G segments. The chip expects one
2717 * segment for the request and another for the response.
2719 cm->cm_max_segs = 2;
2721 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2722 cm->cm_complete = mprsas_smpio_complete;
2723 cm->cm_complete_data = ccb;
2726 * Tell the mapping code that we're using a uio, and that this is
2727 * an SMP passthrough request. There is a little special-case
2728 * logic there (in mpr_data_cb()) to handle the bidirectional
2731 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2732 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2734 /* The chip data format is little endian. */
2735 req->SASAddress.High = htole32(sasaddr >> 32);
2736 req->SASAddress.Low = htole32(sasaddr);
2739 * XXX Note that we don't have a timeout/abort mechanism here.
2740 * From the manual, it looks like task management requests only
2741 * work for SCSI IO and SATA passthrough requests. We may need to
2742 * have a mechanism to retry requests in the event of a chip reset
2743 * at least. Hopefully the chip will insure that any errors short
2744 * of that are relayed back to the driver.
2746 error = mpr_map_command(sc, cm);
2747 if ((error != 0) && (error != EINPROGRESS)) {
2748 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2749 "mpr_map_command()\n", __func__, error);
2756 mpr_free_command(sc, cm);
2757 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2763 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2765 struct mpr_softc *sc;
2766 struct mprsas_target *targ;
2767 uint64_t sasaddr = 0;
2772 * Make sure the target exists.
2774 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2775 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2776 targ = &sassc->targets[ccb->ccb_h.target_id];
2777 if (targ->handle == 0x0) {
2778 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2779 __func__, ccb->ccb_h.target_id);
2780 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2786 * If this device has an embedded SMP target, we'll talk to it
2788 * figure out what the expander's address is.
2790 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2791 sasaddr = targ->sasaddr;
2794 * If we don't have a SAS address for the expander yet, try
2795 * grabbing it from the page 0x83 information cached in the
2796 * transport layer for this target. LSI expanders report the
2797 * expander SAS address as the port-associated SAS address in
2798 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2801 * XXX KDM disable this for now, but leave it commented out so that
2802 * it is obvious that this is another possible way to get the SAS
2805 * The parent handle method below is a little more reliable, and
2806 * the other benefit is that it works for devices other than SES
2807 * devices. So you can send a SMP request to a da(4) device and it
2808 * will get routed to the expander that device is attached to.
2809 * (Assuming the da(4) device doesn't contain an SMP target...)
2813 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2817 * If we still don't have a SAS address for the expander, look for
2818 * the parent device of this device, which is probably the expander.
2821 #ifdef OLD_MPR_PROBE
2822 struct mprsas_target *parent_target;
2825 if (targ->parent_handle == 0x0) {
2826 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2827 "a valid parent handle!\n", __func__, targ->handle);
2828 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2831 #ifdef OLD_MPR_PROBE
2832 parent_target = mprsas_find_target_by_handle(sassc, 0,
2833 targ->parent_handle);
2835 if (parent_target == NULL) {
2836 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2837 "a valid parent target!\n", __func__, targ->handle);
2838 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2842 if ((parent_target->devinfo &
2843 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2844 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2845 "does not have an SMP target!\n", __func__,
2846 targ->handle, parent_target->handle);
2847 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2852 sasaddr = parent_target->sasaddr;
2853 #else /* OLD_MPR_PROBE */
2854 if ((targ->parent_devinfo &
2855 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2856 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2857 "does not have an SMP target!\n", __func__,
2858 targ->handle, targ->parent_handle);
2859 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2863 if (targ->parent_sasaddr == 0x0) {
2864 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2865 "%d does not have a valid SAS address!\n", __func__,
2866 targ->handle, targ->parent_handle);
2867 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2871 sasaddr = targ->parent_sasaddr;
2872 #endif /* OLD_MPR_PROBE */
2877 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2878 "handle %d\n", __func__, targ->handle);
2879 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2882 mprsas_send_smpcmd(sassc, ccb, sasaddr);
2890 #endif //__FreeBSD_version >= 900026
2893 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2895 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2896 struct mpr_softc *sc;
2897 struct mpr_command *tm;
2898 struct mprsas_target *targ;
2900 MPR_FUNCTRACE(sassc->sc);
2901 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2903 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2904 ("Target %d out of bounds in XPT_RESET_DEV\n",
2905 ccb->ccb_h.target_id));
2907 tm = mpr_alloc_command(sc);
2909 mpr_dprint(sc, MPR_ERROR,
2910 "command alloc failure in mprsas_action_resetdev\n");
2911 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2916 targ = &sassc->targets[ccb->ccb_h.target_id];
2917 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2918 req->DevHandle = htole16(targ->handle);
2919 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2920 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2922 /* SAS Hard Link Reset / SATA Link Reset */
2923 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2926 tm->cm_desc.HighPriority.RequestFlags =
2927 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2928 tm->cm_complete = mprsas_resetdev_complete;
2929 tm->cm_complete_data = ccb;
2931 mpr_map_command(sc, tm);
2935 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2937 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2941 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2943 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2944 ccb = tm->cm_complete_data;
2947 * Currently there should be no way we can hit this case. It only
2948 * happens when we have a failure to allocate chain frames, and
2949 * task management commands don't have S/G lists.
2951 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2952 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2954 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2956 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
2957 "handle %#04x! This should not happen!\n", __func__,
2958 tm->cm_flags, req->DevHandle);
2959 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2963 mpr_dprint(sc, MPR_XINFO,
2964 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2965 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
2967 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2968 ccb->ccb_h.status = CAM_REQ_CMP;
2969 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2973 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2977 mprsas_free_tm(sc, tm);
2982 mprsas_poll(struct cam_sim *sim)
2984 struct mprsas_softc *sassc;
2986 sassc = cam_sim_softc(sim);
2988 if (sassc->sc->mpr_debug & MPR_TRACE) {
2989 /* frequent debug messages during a panic just slow
2990 * everything down too much.
2992 mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__);
2993 sassc->sc->mpr_debug &= ~MPR_TRACE;
2996 mpr_intr_locked(sassc->sc);
3000 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3003 struct mpr_softc *sc;
3005 sc = (struct mpr_softc *)callback_arg;
3008 #if (__FreeBSD_version >= 1000006) || \
3009 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3010 case AC_ADVINFO_CHANGED: {
3011 struct mprsas_target *target;
3012 struct mprsas_softc *sassc;
3013 struct scsi_read_capacity_data_long rcap_buf;
3014 struct ccb_dev_advinfo cdai;
3015 struct mprsas_lun *lun;
3020 buftype = (uintptr_t)arg;
3026 * We're only interested in read capacity data changes.
3028 if (buftype != CDAI_TYPE_RCAPLONG)
3032 * See the comment in mpr_attach_sas() for a detailed
3033 * explanation. In these versions of FreeBSD we register
3034 * for all events and filter out the events that don't
3037 #if (__FreeBSD_version < 1000703) || \
3038 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3039 if (xpt_path_path_id(path) != sassc->sim->path_id)
3044 * We should have a handle for this, but check to make sure.
3046 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3047 ("Target %d out of bounds in mprsas_async\n",
3048 xpt_path_target_id(path)));
3049 target = &sassc->targets[xpt_path_target_id(path)];
3050 if (target->handle == 0)
3053 lunid = xpt_path_lun_id(path);
3055 SLIST_FOREACH(lun, &target->luns, lun_link) {
3056 if (lun->lun_id == lunid) {
3062 if (found_lun == 0) {
3063 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3066 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3067 "LUN for EEDP support.\n");
3070 lun->lun_id = lunid;
3071 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3074 bzero(&rcap_buf, sizeof(rcap_buf));
3075 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3076 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3077 cdai.ccb_h.flags = CAM_DIR_IN;
3078 cdai.buftype = CDAI_TYPE_RCAPLONG;
3080 cdai.bufsiz = sizeof(rcap_buf);
3081 cdai.buf = (uint8_t *)&rcap_buf;
3082 xpt_action((union ccb *)&cdai);
3083 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3084 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3086 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3087 && (rcap_buf.prot & SRC16_PROT_EN)) {
3088 lun->eedp_formatted = TRUE;
3089 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3091 lun->eedp_formatted = FALSE;
3092 lun->eedp_block_size = 0;
3097 case AC_FOUND_DEVICE: {
3098 struct ccb_getdev *cgd;
3101 * See the comment in mpr_attach_sas() for a detailed
3102 * explanation. In these versions of FreeBSD we register
3103 * for all events and filter out the events that don't
3106 #if (__FreeBSD_version < 1000703) || \
3107 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3108 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3113 mprsas_prepare_ssu(sc, path, cgd);
3115 #if (__FreeBSD_version < 901503) || \
3116 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3117 mprsas_check_eedp(sc, path, cgd);
3127 mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
3128 struct ccb_getdev *cgd)
3130 struct mprsas_softc *sassc = sc->sassc;
3132 target_id_t targetid;
3134 struct mprsas_target *target;
3135 struct mprsas_lun *lun;
3139 pathid = cam_sim_path(sassc->sim);
3140 targetid = xpt_path_target_id(path);
3141 lunid = xpt_path_lun_id(path);
3143 KASSERT(targetid < sassc->maxtargets,
3144 ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid));
3145 target = &sassc->targets[targetid];
3146 if (target->handle == 0x0)
3150 * If LUN is already in list, don't create a new one.
3153 SLIST_FOREACH(lun, &target->luns, lun_link) {
3154 if (lun->lun_id == lunid) {
3160 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3163 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3164 "preparing SSU.\n");
3167 lun->lun_id = lunid;
3168 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3172 * If this is a SATA direct-access end device, mark it so that a SCSI
3173 * StartStopUnit command will be sent to it when the driver is being
3176 if (((cgd->inq_data.device & 0x1F) == T_DIRECT) &&
3177 (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3178 ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3179 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3180 lun->stop_at_shutdown = TRUE;
3184 #if (__FreeBSD_version < 901503) || \
3185 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3187 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3188 struct ccb_getdev *cgd)
3190 struct mprsas_softc *sassc = sc->sassc;
3191 struct ccb_scsiio *csio;
3192 struct scsi_read_capacity_16 *scsi_cmd;
3193 struct scsi_read_capacity_eedp *rcap_buf;
3195 target_id_t targetid;
3198 struct cam_path *local_path;
3199 struct mprsas_target *target;
3200 struct mprsas_lun *lun;
3205 pathid = cam_sim_path(sassc->sim);
3206 targetid = xpt_path_target_id(path);
3207 lunid = xpt_path_lun_id(path);
3209 KASSERT(targetid < sassc->maxtargets,
3210 ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3211 target = &sassc->targets[targetid];
3212 if (target->handle == 0x0)
3216 * Determine if the device is EEDP capable.
3218 * If this flag is set in the inquiry data, the device supports
3219 * protection information, and must support the 16 byte read capacity
3220 * command, otherwise continue without sending read cap 16
3222 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3226 * Issue a READ CAPACITY 16 command. This info is used to determine if
3227 * the LUN is formatted for EEDP support.
3229 ccb = xpt_alloc_ccb_nowait();
3231 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3236 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3238 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3245 * If LUN is already in list, don't create a new one.
3248 SLIST_FOREACH(lun, &target->luns, lun_link) {
3249 if (lun->lun_id == lunid) {
3255 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3258 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3260 xpt_free_path(local_path);
3264 lun->lun_id = lunid;
3265 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3268 xpt_path_string(local_path, path_str, sizeof(path_str));
3269 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3270 path_str, target->handle);
3273 * Issue a READ CAPACITY 16 command for the LUN. The
3274 * mprsas_read_cap_done function will load the read cap info into the
3277 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3279 if (rcap_buf == NULL) {
3280 mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity "
3281 "buffer for EEDP support.\n");
3282 xpt_free_path(ccb->ccb_h.path);
3286 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3288 csio->ccb_h.func_code = XPT_SCSI_IO;
3289 csio->ccb_h.flags = CAM_DIR_IN;
3290 csio->ccb_h.retry_count = 4;
3291 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3292 csio->ccb_h.timeout = 60000;
3293 csio->data_ptr = (uint8_t *)rcap_buf;
3294 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3295 csio->sense_len = MPR_SENSE_LEN;
3296 csio->cdb_len = sizeof(*scsi_cmd);
3297 csio->tag_action = MSG_SIMPLE_Q_TAG;
3299 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3300 bzero(scsi_cmd, sizeof(*scsi_cmd));
3301 scsi_cmd->opcode = 0x9E;
3302 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3303 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3305 ccb->ccb_h.ppriv_ptr1 = sassc;
3310 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3312 struct mprsas_softc *sassc;
3313 struct mprsas_target *target;
3314 struct mprsas_lun *lun;
3315 struct scsi_read_capacity_eedp *rcap_buf;
3317 if (done_ccb == NULL)
3320 /* Driver need to release devq, it Scsi command is
3321 * generated by driver internally.
3322 * Currently there is a single place where driver
3323 * calls scsi command internally. In future if driver
3324 * calls more scsi command internally, it needs to release
3325 * devq internally, since those command will not go back to
3328 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3329 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3330 xpt_release_devq(done_ccb->ccb_h.path,
3331 /*count*/ 1, /*run_queue*/TRUE);
3334 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3337 * Get the LUN ID for the path and look it up in the LUN list for the
3340 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3341 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3342 ("Target %d out of bounds in mprsas_read_cap_done\n",
3343 done_ccb->ccb_h.target_id));
3344 target = &sassc->targets[done_ccb->ccb_h.target_id];
3345 SLIST_FOREACH(lun, &target->luns, lun_link) {
3346 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3350 * Got the LUN in the target's LUN list. Fill it in with EEDP
3351 * info. If the READ CAP 16 command had some SCSI error (common
3352 * if command is not supported), mark the lun as not supporting
3353 * EEDP and set the block size to 0.
3355 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3356 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3357 lun->eedp_formatted = FALSE;
3358 lun->eedp_block_size = 0;
3362 if (rcap_buf->protect & 0x01) {
3363 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for "
3364 "target ID %d is formatted for EEDP "
3365 "support.\n", done_ccb->ccb_h.target_lun,
3366 done_ccb->ccb_h.target_id);
3367 lun->eedp_formatted = TRUE;
3368 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3373 // Finished with this CCB and path.
3374 free(rcap_buf, M_MPR);
3375 xpt_free_path(done_ccb->ccb_h.path);
3376 xpt_free_ccb(done_ccb);
3378 #endif /* (__FreeBSD_version < 901503) || \
3379 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3382 mprsas_startup(struct mpr_softc *sc)
3385 * Send the port enable message and set the wait_for_port_enable flag.
3386 * This flag helps to keep the simq frozen until all discovery events
3389 sc->wait_for_port_enable = 1;
3390 mprsas_send_portenable(sc);
3395 mprsas_send_portenable(struct mpr_softc *sc)
3397 MPI2_PORT_ENABLE_REQUEST *request;
3398 struct mpr_command *cm;
3402 if ((cm = mpr_alloc_command(sc)) == NULL)
3404 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3405 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3406 request->MsgFlags = 0;
3408 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3409 cm->cm_complete = mprsas_portenable_complete;
3413 mpr_map_command(sc, cm);
3414 mpr_dprint(sc, MPR_XINFO,
3415 "mpr_send_portenable finished cm %p req %p complete %p\n",
3416 cm, cm->cm_req, cm->cm_complete);
3421 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3423 MPI2_PORT_ENABLE_REPLY *reply;
3424 struct mprsas_softc *sassc;
3430 * Currently there should be no way we can hit this case. It only
3431 * happens when we have a failure to allocate chain frames, and
3432 * port enable commands don't have S/G lists.
3434 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3435 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3436 "This should not happen!\n", __func__, cm->cm_flags);
3439 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3441 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3442 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3443 MPI2_IOCSTATUS_SUCCESS)
3444 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3446 mpr_free_command(sc, cm);
3447 if (sc->mpr_ich.ich_arg != NULL) {
3448 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3449 config_intrhook_disestablish(&sc->mpr_ich);
3450 sc->mpr_ich.ich_arg = NULL;
3454 * Done waiting for port enable to complete. Decrement the refcount.
3455 * If refcount is 0, discovery is complete and a rescan of the bus can
3458 sc->wait_for_port_enable = 0;
3459 sc->port_enable_complete = 1;
3460 wakeup(&sc->port_enable_complete);
3461 mprsas_startup_decrement(sassc);
3465 mprsas_check_id(struct mprsas_softc *sassc, int id)
3467 struct mpr_softc *sc = sassc->sc;
3471 ids = &sc->exclude_ids[0];
3472 while((name = strsep(&ids, ",")) != NULL) {
3473 if (name[0] == '\0')
3475 if (strtol(name, NULL, 0) == (long)id)