2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2014 LSI Corp.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 /* Communications core for LSI MPT2 */
33 /* TODO Move headers to mprvar */
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/selinfo.h>
39 #include <sys/module.h>
43 #include <sys/malloc.h>
45 #include <sys/sysctl.h>
46 #include <sys/endian.h>
47 #include <sys/queue.h>
48 #include <sys/kthread.h>
49 #include <sys/taskqueue.h>
52 #include <machine/bus.h>
53 #include <machine/resource.h>
56 #include <machine/stdarg.h>
59 #include <cam/cam_ccb.h>
60 #include <cam/cam_debug.h>
61 #include <cam/cam_sim.h>
62 #include <cam/cam_xpt_sim.h>
63 #include <cam/cam_xpt_periph.h>
64 #include <cam/cam_periph.h>
65 #include <cam/scsi/scsi_all.h>
66 #include <cam/scsi/scsi_message.h>
67 #if __FreeBSD_version >= 900026
68 #include <cam/scsi/smp_all.h>
71 #include <dev/mpr/mpi/mpi2_type.h>
72 #include <dev/mpr/mpi/mpi2.h>
73 #include <dev/mpr/mpi/mpi2_ioc.h>
74 #include <dev/mpr/mpi/mpi2_sas.h>
75 #include <dev/mpr/mpi/mpi2_cnfg.h>
76 #include <dev/mpr/mpi/mpi2_init.h>
77 #include <dev/mpr/mpi/mpi2_tool.h>
78 #include <dev/mpr/mpr_ioctl.h>
79 #include <dev/mpr/mprvar.h>
80 #include <dev/mpr/mpr_table.h>
81 #include <dev/mpr/mpr_sas.h>
83 #define MPRSAS_DISCOVERY_TIMEOUT 20
84 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
87 * static array to check SCSI OpCode for EEDP protection bits
89 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
90 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
91 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
92 static uint8_t op_code_prot[256] = {
93 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
96 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
111 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
113 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
114 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
115 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
116 static void mprsas_poll(struct cam_sim *sim);
117 static void mprsas_scsiio_timeout(void *data);
118 static void mprsas_abort_complete(struct mpr_softc *sc,
119 struct mpr_command *cm);
120 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
121 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
123 static void mprsas_resetdev_complete(struct mpr_softc *,
124 struct mpr_command *);
125 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
126 struct mpr_command *cm);
127 static int mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm,
129 static void mprsas_async(void *callback_arg, uint32_t code,
130 struct cam_path *path, void *arg);
131 static void mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
132 struct ccb_getdev *cgd);
133 #if (__FreeBSD_version < 901503) || \
134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136 struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138 union ccb *done_ccb);
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142 struct mpr_command *cm);
144 #if __FreeBSD_version >= 900026
146 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148 union ccb *ccb, uint64_t sasaddr);
150 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
157 struct mprsas_target *target;
160 for (i = start; i < sassc->maxtargets; i++) {
161 target = &sassc->targets[i];
162 if (target->handle == handle)
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170 * commands before device handles have been found by discovery. Since
171 * discovery involves reading config pages and possibly sending commands,
172 * discovery actions may continue even after we receive the end of discovery
173 * event, so refcount discovery actions instead of assuming we can unfreeze
174 * the simq when we get the event.
177 mprsas_startup_increment(struct mprsas_softc *sassc)
179 MPR_FUNCTRACE(sassc->sc);
181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 if (sassc->startup_refcount++ == 0) {
183 /* just starting, freeze the simq */
184 mpr_dprint(sassc->sc, MPR_INIT,
185 "%s freezing simq\n", __func__);
186 #if __FreeBSD_version >= 1000039
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if __FreeBSD_version >= 1000039
223 mprsas_rescan_target(sassc->sc, NULL);
226 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
227 sassc->startup_refcount);
231 /* LSI's firmware requires us to stop sending commands when we're doing task
232 * management, so refcount the TMs and keep the simq frozen when any are in
236 mprsas_alloc_tm(struct mpr_softc *sc)
238 struct mpr_command *tm;
241 tm = mpr_alloc_high_priority_command(sc);
243 if (sc->sassc->tm_count++ == 0) {
244 mpr_dprint(sc, MPR_RECOVERY,
245 "%s freezing simq\n", __func__);
246 xpt_freeze_simq(sc->sassc->sim, 1);
248 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
249 sc->sassc->tm_count);
255 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
257 mpr_dprint(sc, MPR_TRACE, "%s", __func__);
261 /* if there are no TMs in use, we can release the simq. We use our
262 * own refcount so that it's easier for a diag reset to cleanup and
265 if (--sc->sassc->tm_count == 0) {
266 mpr_dprint(sc, MPR_RECOVERY, "%s releasing simq\n", __func__);
267 xpt_release_simq(sc->sassc->sim, 1);
269 mpr_dprint(sc, MPR_RECOVERY, "%s tm_count %u\n", __func__,
270 sc->sassc->tm_count);
272 mpr_free_high_priority_command(sc, tm);
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
278 struct mprsas_softc *sassc = sc->sassc;
280 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = xpt_alloc_ccb_nowait();
295 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
299 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
300 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
306 if (targetid == CAM_TARGET_WILDCARD)
307 ccb->ccb_h.func_code = XPT_SCAN_BUS;
309 ccb->ccb_h.func_code = XPT_SCAN_TGT;
311 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
326 /* No need to be in here if debugging isn't enabled */
327 if ((cm->cm_sc->mpr_debug & level) == 0)
330 sbuf_new(&sb, str, sizeof(str), 0);
334 if (cm->cm_ccb != NULL) {
335 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
337 sbuf_cat(&sb, path_str);
338 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 scsi_command_string(&cm->cm_ccb->csio, &sb);
340 sbuf_printf(&sb, "length %d ",
341 cm->cm_ccb->csio.dxfer_len);
344 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 cam_sim_name(cm->cm_sc->sassc->sim),
346 cam_sim_unit(cm->cm_sc->sassc->sim),
347 cam_sim_bus(cm->cm_sc->sassc->sim),
348 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
352 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 sbuf_vprintf(&sb, fmt, ap);
355 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
363 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 struct mprsas_target *targ;
369 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
374 /* XXX retry the remove after the diag reset completes? */
375 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 "0x%04x\n", __func__, handle);
377 mprsas_free_tm(sc, tm);
381 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
382 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
383 "device 0x%x\n", reply->IOCStatus, handle);
384 mprsas_free_tm(sc, tm);
388 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
389 reply->TerminationCount);
390 mpr_free_reply(sc, tm->cm_reply_data);
391 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
393 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
397 * Don't clear target if remove fails because things will get confusing.
398 * Leave the devname and sasaddr intact so that we know to avoid reusing
399 * this target id if possible, and so we can assign the same target id
400 * to this device if it comes back in the future.
402 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
405 targ->encl_handle = 0x0;
406 targ->encl_level_valid = 0x0;
407 targ->encl_level = 0x0;
408 targ->connector_name[0] = ' ';
409 targ->connector_name[1] = ' ';
410 targ->connector_name[2] = ' ';
411 targ->connector_name[3] = ' ';
412 targ->encl_slot = 0x0;
413 targ->exp_dev_handle = 0x0;
415 targ->linkrate = 0x0;
418 targ->scsi_req_desc_type = 0;
421 mprsas_free_tm(sc, tm);
426 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427 * Otherwise Volume Delete is same as Bare Drive Removal.
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
432 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 struct mpr_softc *sc;
434 struct mpr_command *cm;
435 struct mprsas_target *targ = NULL;
437 MPR_FUNCTRACE(sassc->sc);
440 targ = mprsas_find_target_by_handle(sassc, 0, handle);
442 /* FIXME: what is the action? */
443 /* We don't know about this device? */
444 mpr_dprint(sc, MPR_ERROR,
445 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449 targ->flags |= MPRSAS_TARGET_INREMOVAL;
451 cm = mprsas_alloc_tm(sc);
453 mpr_dprint(sc, MPR_ERROR,
454 "%s: command alloc failure\n", __func__);
458 mprsas_rescan_target(sc, targ);
460 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 req->DevHandle = targ->handle;
462 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
465 /* SAS Hard Link Reset / SATA Link Reset */
466 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470 cm->cm_desc.HighPriority.RequestFlags =
471 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 cm->cm_complete = mprsas_remove_volume;
473 cm->cm_complete_data = (void *)(uintptr_t)handle;
474 mpr_map_command(sc, cm);
478 * The MPT2 firmware performs debounce on the link to avoid transient link
479 * errors and false removals. When it does decide that link has been lost
480 * and a device needs to go away, it expects that the host will perform a
481 * target reset and then an op remove. The reset has the side-effect of
482 * aborting any outstanding requests for the device, which is required for
483 * the op-remove to succeed. It's not clear if the host should check for
484 * the device coming back alive after the reset.
487 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
489 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
490 struct mpr_softc *sc;
491 struct mpr_command *cm;
492 struct mprsas_target *targ = NULL;
494 MPR_FUNCTRACE(sassc->sc);
498 targ = mprsas_find_target_by_handle(sassc, 0, handle);
500 /* FIXME: what is the action? */
501 /* We don't know about this device? */
502 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
507 targ->flags |= MPRSAS_TARGET_INREMOVAL;
509 cm = mprsas_alloc_tm(sc);
511 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
516 mprsas_rescan_target(sc, targ);
518 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
519 memset(req, 0, sizeof(*req));
520 req->DevHandle = htole16(targ->handle);
521 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
522 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
524 /* SAS Hard Link Reset / SATA Link Reset */
525 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
529 cm->cm_desc.HighPriority.RequestFlags =
530 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
531 cm->cm_complete = mprsas_remove_device;
532 cm->cm_complete_data = (void *)(uintptr_t)handle;
533 mpr_map_command(sc, cm);
537 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
539 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
540 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
541 struct mprsas_target *targ;
542 struct mpr_command *next_cm;
547 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
548 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
552 * Currently there should be no way we can hit this case. It only
553 * happens when we have a failure to allocate chain frames, and
554 * task management commands don't have S/G lists.
556 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
557 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
558 "handle %#04x! This should not happen!\n", __func__,
559 tm->cm_flags, handle);
560 mprsas_free_tm(sc, tm);
565 /* XXX retry the remove after the diag reset completes? */
566 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
567 "0x%04x\n", __func__, handle);
568 mprsas_free_tm(sc, tm);
572 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
573 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
574 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
575 mprsas_free_tm(sc, tm);
579 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
580 le32toh(reply->TerminationCount));
581 mpr_free_reply(sc, tm->cm_reply_data);
582 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
584 /* Reuse the existing command */
585 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
586 memset(req, 0, sizeof(*req));
587 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
588 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
589 req->DevHandle = htole16(handle);
591 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
592 tm->cm_complete = mprsas_remove_complete;
593 tm->cm_complete_data = (void *)(uintptr_t)handle;
595 mpr_map_command(sc, tm);
597 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
599 if (targ->encl_level_valid) {
600 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
601 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
602 targ->connector_name);
604 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
607 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
608 ccb = tm->cm_complete_data;
609 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
610 mprsas_scsiio_complete(sc, tm);
615 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
617 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
619 struct mprsas_target *targ;
620 struct mprsas_lun *lun;
624 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
625 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
628 * Currently there should be no way we can hit this case. It only
629 * happens when we have a failure to allocate chain frames, and
630 * task management commands don't have S/G lists.
632 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
633 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
634 "handle %#04x! This should not happen!\n", __func__,
635 tm->cm_flags, handle);
636 mprsas_free_tm(sc, tm);
641 /* most likely a chip reset */
642 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
643 "0x%04x\n", __func__, handle);
644 mprsas_free_tm(sc, tm);
648 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
649 __func__, handle, le16toh(reply->IOCStatus));
652 * Don't clear target if remove fails because things will get confusing.
653 * Leave the devname and sasaddr intact so that we know to avoid reusing
654 * this target id if possible, and so we can assign the same target id
655 * to this device if it comes back in the future.
657 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
660 targ->encl_handle = 0x0;
661 targ->encl_level_valid = 0x0;
662 targ->encl_level = 0x0;
663 targ->connector_name[0] = ' ';
664 targ->connector_name[1] = ' ';
665 targ->connector_name[2] = ' ';
666 targ->connector_name[3] = ' ';
667 targ->encl_slot = 0x0;
668 targ->exp_dev_handle = 0x0;
670 targ->linkrate = 0x0;
673 targ->scsi_req_desc_type = 0;
675 while (!SLIST_EMPTY(&targ->luns)) {
676 lun = SLIST_FIRST(&targ->luns);
677 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
682 mprsas_free_tm(sc, tm);
686 mprsas_register_events(struct mpr_softc *sc)
691 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
692 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
693 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
694 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
695 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
696 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
697 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
698 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
699 setbit(events, MPI2_EVENT_IR_VOLUME);
700 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
701 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
702 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
704 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
705 &sc->sassc->mprsas_eh);
711 mpr_attach_sas(struct mpr_softc *sc)
713 struct mprsas_softc *sassc;
719 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
721 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
727 * XXX MaxTargets could change during a reinit. since we don't
728 * resize the targets[] array during such an event, cache the value
729 * of MaxTargets here so that we don't get into trouble later. This
730 * should move into the reinit logic.
732 sassc->maxtargets = sc->facts->MaxTargets;
733 sassc->targets = malloc(sizeof(struct mprsas_target) *
734 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
735 if (!sassc->targets) {
736 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
744 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
745 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
750 unit = device_get_unit(sc->mpr_dev);
751 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
752 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
753 if (sassc->sim == NULL) {
754 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
759 TAILQ_INIT(&sassc->ev_queue);
761 /* Initialize taskqueue for Event Handling */
762 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
763 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
764 taskqueue_thread_enqueue, &sassc->ev_tq);
766 /* Run the task queue with lowest priority */
767 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
768 device_get_nameunit(sc->mpr_dev));
773 * XXX There should be a bus for every port on the adapter, but since
774 * we're just going to fake the topology for now, we'll pretend that
775 * everything is just a target on a single bus.
777 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
778 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
785 * Assume that discovery events will start right away. Freezing
787 * Hold off boot until discovery is complete.
789 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
790 sc->sassc->startup_refcount = 0;
791 mprsas_startup_increment(sassc);
793 callout_init(&sassc->discovery_callout, 1 /*mprafe*/);
798 * Register for async events so we can determine the EEDP
799 * capabilities of devices.
801 status = xpt_create_path(&sassc->path, /*periph*/NULL,
802 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
804 if (status != CAM_REQ_CMP) {
805 mpr_printf(sc, "Error %#x creating sim path\n", status);
810 #if (__FreeBSD_version >= 1000006) || \
811 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
812 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
814 event = AC_FOUND_DEVICE;
816 status = xpt_register_async(event, mprsas_async, sc,
818 if (status != CAM_REQ_CMP) {
819 mpr_dprint(sc, MPR_ERROR,
820 "Error %#x registering async handler for "
821 "AC_ADVINFO_CHANGED events\n", status);
822 xpt_free_path(sassc->path);
826 if (status != CAM_REQ_CMP) {
828 * EEDP use is the exception, not the rule.
829 * Warn the user, but do not fail to attach.
831 mpr_printf(sc, "EEDP capabilities disabled.\n");
836 mprsas_register_events(sc);
844 mpr_detach_sas(struct mpr_softc *sc)
846 struct mprsas_softc *sassc;
847 struct mprsas_lun *lun, *lun_tmp;
848 struct mprsas_target *targ;
853 if (sc->sassc == NULL)
857 mpr_deregister_events(sc, sassc->mprsas_eh);
860 * Drain and free the event handling taskqueue with the lock
861 * unheld so that any parallel processing tasks drain properly
862 * without deadlocking.
864 if (sassc->ev_tq != NULL)
865 taskqueue_free(sassc->ev_tq);
867 /* Make sure CAM doesn't wedge if we had to bail out early. */
870 /* Deregister our async handler */
871 if (sassc->path != NULL) {
872 xpt_register_async(0, mprsas_async, sc, sassc->path);
873 xpt_free_path(sassc->path);
877 if (sassc->flags & MPRSAS_IN_STARTUP)
878 xpt_release_simq(sassc->sim, 1);
880 if (sassc->sim != NULL) {
881 xpt_bus_deregister(cam_sim_path(sassc->sim));
882 cam_sim_free(sassc->sim, FALSE);
885 sassc->flags |= MPRSAS_SHUTDOWN;
888 if (sassc->devq != NULL)
889 cam_simq_free(sassc->devq);
891 for (i = 0; i < sassc->maxtargets; i++) {
892 targ = &sassc->targets[i];
893 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
897 free(sassc->targets, M_MPR);
905 mprsas_discovery_end(struct mprsas_softc *sassc)
907 struct mpr_softc *sc = sassc->sc;
911 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
912 callout_stop(&sassc->discovery_callout);
917 mprsas_action(struct cam_sim *sim, union ccb *ccb)
919 struct mprsas_softc *sassc;
921 sassc = cam_sim_softc(sim);
923 MPR_FUNCTRACE(sassc->sc);
924 mpr_dprint(sassc->sc, MPR_TRACE, "%s func 0x%x\n", __func__,
925 ccb->ccb_h.func_code);
926 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
928 switch (ccb->ccb_h.func_code) {
931 struct ccb_pathinq *cpi = &ccb->cpi;
933 cpi->version_num = 1;
934 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
935 cpi->target_sprt = 0;
936 #if __FreeBSD_version >= 1000039
937 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
939 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
941 cpi->hba_eng_cnt = 0;
942 cpi->max_target = sassc->maxtargets - 1;
944 cpi->initiator_id = sassc->maxtargets - 1;
945 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
946 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN);
947 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
948 cpi->unit_number = cam_sim_unit(sim);
949 cpi->bus_id = cam_sim_bus(sim);
951 * XXXSLM-I think this needs to change based on config page or
952 * something instead of hardcoded to 150000.
954 cpi->base_transfer_speed = 150000;
955 cpi->transport = XPORT_SAS;
956 cpi->transport_version = 0;
957 cpi->protocol = PROTO_SCSI;
958 cpi->protocol_version = SCSI_REV_SPC;
959 #if __FreeBSD_version >= 800001
961 * XXXSLM-probably need to base this number on max SGL's and
964 cpi->maxio = 256 * 1024;
966 cpi->ccb_h.status = CAM_REQ_CMP;
969 case XPT_GET_TRAN_SETTINGS:
971 struct ccb_trans_settings *cts;
972 struct ccb_trans_settings_sas *sas;
973 struct ccb_trans_settings_scsi *scsi;
974 struct mprsas_target *targ;
977 sas = &cts->xport_specific.sas;
978 scsi = &cts->proto_specific.scsi;
980 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
981 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
982 cts->ccb_h.target_id));
983 targ = &sassc->targets[cts->ccb_h.target_id];
984 if (targ->handle == 0x0) {
985 cts->ccb_h.status = CAM_DEV_NOT_THERE;
989 cts->protocol_version = SCSI_REV_SPC2;
990 cts->transport = XPORT_SAS;
991 cts->transport_version = 0;
993 sas->valid = CTS_SAS_VALID_SPEED;
994 switch (targ->linkrate) {
996 sas->bitrate = 150000;
999 sas->bitrate = 300000;
1002 sas->bitrate = 600000;
1008 cts->protocol = PROTO_SCSI;
1009 scsi->valid = CTS_SCSI_VALID_TQ;
1010 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1012 cts->ccb_h.status = CAM_REQ_CMP;
1015 case XPT_CALC_GEOMETRY:
1016 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1017 ccb->ccb_h.status = CAM_REQ_CMP;
1020 mpr_dprint(sassc->sc, MPR_XINFO,
1021 "mprsas_action XPT_RESET_DEV\n");
1022 mprsas_action_resetdev(sassc, ccb);
1027 mpr_dprint(sassc->sc, MPR_XINFO,
1028 "mprsas_action faking success for abort or reset\n");
1029 ccb->ccb_h.status = CAM_REQ_CMP;
1032 mprsas_action_scsiio(sassc, ccb);
1034 #if __FreeBSD_version >= 900026
1036 mprsas_action_smpio(sassc, ccb);
1040 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1048 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1049 target_id_t target_id, lun_id_t lun_id)
1051 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1052 struct cam_path *path;
1054 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1055 ac_code, target_id, (uintmax_t)lun_id);
1057 if (xpt_create_path(&path, NULL,
1058 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1059 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1064 xpt_async(ac_code, path, NULL);
1065 xpt_free_path(path);
1069 mprsas_complete_all_commands(struct mpr_softc *sc)
1071 struct mpr_command *cm;
1076 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1078 /* complete all commands with a NULL reply */
1079 for (i = 1; i < sc->num_reqs; i++) {
1080 cm = &sc->commands[i];
1081 cm->cm_reply = NULL;
1084 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1085 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1087 if (cm->cm_complete != NULL) {
1088 mprsas_log_command(cm, MPR_RECOVERY,
1089 "completing cm %p state %x ccb %p for diag reset\n",
1090 cm, cm->cm_state, cm->cm_ccb);
1091 cm->cm_complete(sc, cm);
1095 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1096 mprsas_log_command(cm, MPR_RECOVERY,
1097 "waking up cm %p state %x ccb %p for diag reset\n",
1098 cm, cm->cm_state, cm->cm_ccb);
1103 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1104 /* this should never happen, but if it does, log */
1105 mprsas_log_command(cm, MPR_RECOVERY,
1106 "cm %p state %x flags 0x%x ccb %p during diag "
1107 "reset\n", cm, cm->cm_state, cm->cm_flags,
1114 mprsas_handle_reinit(struct mpr_softc *sc)
1118 /* Go back into startup mode and freeze the simq, so that CAM
1119 * doesn't send any commands until after we've rediscovered all
1120 * targets and found the proper device handles for them.
1122 * After the reset, portenable will trigger discovery, and after all
1123 * discovery-related activities have finished, the simq will be
1126 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1127 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1128 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1129 mprsas_startup_increment(sc->sassc);
1131 /* notify CAM of a bus reset */
1132 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1135 /* complete and cleanup after all outstanding commands */
1136 mprsas_complete_all_commands(sc);
1138 mpr_dprint(sc, MPR_INIT, "%s startup %u tm %u after command "
1139 "completion\n", __func__, sc->sassc->startup_refcount,
1140 sc->sassc->tm_count);
1142 /* zero all the target handles, since they may change after the
1143 * reset, and we have to rediscover all the targets and use the new
1146 for (i = 0; i < sc->sassc->maxtargets; i++) {
1147 if (sc->sassc->targets[i].outstanding != 0)
1148 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1149 i, sc->sassc->targets[i].outstanding);
1150 sc->sassc->targets[i].handle = 0x0;
1151 sc->sassc->targets[i].exp_dev_handle = 0x0;
1152 sc->sassc->targets[i].outstanding = 0;
1153 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1157 mprsas_tm_timeout(void *data)
1159 struct mpr_command *tm = data;
1160 struct mpr_softc *sc = tm->cm_sc;
1162 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1164 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1165 "task mgmt %p timed out\n", tm);
1170 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1171 struct mpr_command *tm)
1173 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1174 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1175 unsigned int cm_count = 0;
1176 struct mpr_command *cm;
1177 struct mprsas_target *targ;
1179 callout_stop(&tm->cm_callout);
1181 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1182 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1186 * Currently there should be no way we can hit this case. It only
1187 * happens when we have a failure to allocate chain frames, and
1188 * task management commands don't have S/G lists.
1190 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1191 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1192 "This should not happen!\n", __func__, tm->cm_flags);
1193 mprsas_free_tm(sc, tm);
1197 if (reply == NULL) {
1198 mprsas_log_command(tm, MPR_RECOVERY,
1199 "NULL reset reply for tm %p\n", tm);
1200 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1201 /* this completion was due to a reset, just cleanup */
1202 targ->flags &= ~MPRSAS_TARGET_INRESET;
1204 mprsas_free_tm(sc, tm);
1207 /* we should have gotten a reply. */
1213 mprsas_log_command(tm, MPR_RECOVERY,
1214 "logical unit reset status 0x%x code 0x%x count %u\n",
1215 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1216 le32toh(reply->TerminationCount));
1218 /* See if there are any outstanding commands for this LUN.
1219 * This could be made more efficient by using a per-LU data
1220 * structure of some sort.
1222 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1223 if (cm->cm_lun == tm->cm_lun)
1227 if (cm_count == 0) {
1228 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1229 "logical unit %u finished recovery after reset\n",
1232 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1235 /* we've finished recovery for this logical unit. check and
1236 * see if some other logical unit has a timedout command
1237 * that needs to be processed.
1239 cm = TAILQ_FIRST(&targ->timedout_commands);
1241 mprsas_send_abort(sc, tm, cm);
1245 mprsas_free_tm(sc, tm);
1249 /* if we still have commands for this LUN, the reset
1250 * effectively failed, regardless of the status reported.
1251 * Escalate to a target reset.
1253 mprsas_log_command(tm, MPR_RECOVERY,
1254 "logical unit reset complete for tm %p, but still have %u "
1255 "command(s)\n", tm, cm_count);
1256 mprsas_send_reset(sc, tm,
1257 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1262 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1264 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1265 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1266 struct mprsas_target *targ;
1268 callout_stop(&tm->cm_callout);
1270 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1271 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1275 * Currently there should be no way we can hit this case. It only
1276 * happens when we have a failure to allocate chain frames, and
1277 * task management commands don't have S/G lists.
1279 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1280 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x for target reset! "
1281 "This should not happen!\n", __func__, tm->cm_flags);
1282 mprsas_free_tm(sc, tm);
1286 if (reply == NULL) {
1287 mprsas_log_command(tm, MPR_RECOVERY,
1288 "NULL reset reply for tm %p\n", tm);
1289 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1290 /* this completion was due to a reset, just cleanup */
1291 targ->flags &= ~MPRSAS_TARGET_INRESET;
1293 mprsas_free_tm(sc, tm);
1296 /* we should have gotten a reply. */
1302 mprsas_log_command(tm, MPR_RECOVERY,
1303 "target reset status 0x%x code 0x%x count %u\n",
1304 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1305 le32toh(reply->TerminationCount));
1307 targ->flags &= ~MPRSAS_TARGET_INRESET;
1309 if (targ->outstanding == 0) {
1310 /* we've finished recovery for this target and all
1311 * of its logical units.
1313 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1314 "recovery finished after target reset\n");
1316 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1320 mprsas_free_tm(sc, tm);
1323 /* after a target reset, if this target still has
1324 * outstanding commands, the reset effectively failed,
1325 * regardless of the status reported. escalate.
1327 mprsas_log_command(tm, MPR_RECOVERY,
1328 "target reset complete for tm %p, but still have %u "
1329 "command(s)\n", tm, targ->outstanding);
1334 #define MPR_RESET_TIMEOUT 30
1337 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1339 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1340 struct mprsas_target *target;
1343 target = tm->cm_targ;
1344 if (target->handle == 0) {
1345 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1346 __func__, target->tid);
1350 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1351 req->DevHandle = htole16(target->handle);
1352 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1353 req->TaskType = type;
1355 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1356 /* XXX Need to handle invalid LUNs */
1357 MPR_SET_LUN(req->LUN, tm->cm_lun);
1358 tm->cm_targ->logical_unit_resets++;
1359 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1360 "sending logical unit reset\n");
1361 tm->cm_complete = mprsas_logical_unit_reset_complete;
1363 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1365 * Target reset method =
1366 * SAS Hard Link Reset / SATA Link Reset
1368 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1369 tm->cm_targ->target_resets++;
1370 tm->cm_targ->flags |= MPRSAS_TARGET_INRESET;
1371 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1372 "sending target reset\n");
1373 tm->cm_complete = mprsas_target_reset_complete;
1376 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1380 mpr_dprint(sc, MPR_XINFO, "to target %u handle 0x%04x\n", target->tid,
1382 if (target->encl_level_valid) {
1383 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1384 "connector name (%4s)\n", target->encl_level,
1385 target->encl_slot, target->connector_name);
1389 tm->cm_desc.HighPriority.RequestFlags =
1390 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1391 tm->cm_complete_data = (void *)tm;
1393 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1394 mprsas_tm_timeout, tm);
1396 err = mpr_map_command(sc, tm);
1398 mprsas_log_command(tm, MPR_RECOVERY,
1399 "error %d sending reset type %u\n",
1407 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1409 struct mpr_command *cm;
1410 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1411 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1412 struct mprsas_target *targ;
1414 callout_stop(&tm->cm_callout);
1416 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1417 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1421 * Currently there should be no way we can hit this case. It only
1422 * happens when we have a failure to allocate chain frames, and
1423 * task management commands don't have S/G lists.
1425 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1426 mprsas_log_command(tm, MPR_RECOVERY,
1427 "cm_flags = %#x for abort %p TaskMID %u!\n",
1428 tm->cm_flags, tm, le16toh(req->TaskMID));
1429 mprsas_free_tm(sc, tm);
1433 if (reply == NULL) {
1434 mprsas_log_command(tm, MPR_RECOVERY,
1435 "NULL abort reply for tm %p TaskMID %u\n",
1436 tm, le16toh(req->TaskMID));
1437 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1438 /* this completion was due to a reset, just cleanup */
1440 mprsas_free_tm(sc, tm);
1443 /* we should have gotten a reply. */
1449 mprsas_log_command(tm, MPR_RECOVERY,
1450 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1451 le16toh(req->TaskMID),
1452 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1453 le32toh(reply->TerminationCount));
1455 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1457 /* if there are no more timedout commands, we're done with
1458 * error recovery for this target.
1460 mprsas_log_command(tm, MPR_RECOVERY,
1461 "finished recovery after aborting TaskMID %u\n",
1462 le16toh(req->TaskMID));
1465 mprsas_free_tm(sc, tm);
1467 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1468 /* abort success, but we have more timedout commands to abort */
1469 mprsas_log_command(tm, MPR_RECOVERY,
1470 "continuing recovery after aborting TaskMID %u\n",
1471 le16toh(req->TaskMID));
1473 mprsas_send_abort(sc, tm, cm);
1476 /* we didn't get a command completion, so the abort
1477 * failed as far as we're concerned. escalate.
1479 mprsas_log_command(tm, MPR_RECOVERY,
1480 "abort failed for TaskMID %u tm %p\n",
1481 le16toh(req->TaskMID), tm);
1483 mprsas_send_reset(sc, tm,
1484 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1488 #define MPR_ABORT_TIMEOUT 5
1491 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1492 struct mpr_command *cm)
1494 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1495 struct mprsas_target *targ;
1499 if (targ->handle == 0) {
1500 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1501 __func__, cm->cm_ccb->ccb_h.target_id);
1505 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1506 "Aborting command %p\n", cm);
1508 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1509 req->DevHandle = htole16(targ->handle);
1510 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1511 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1513 /* XXX Need to handle invalid LUNs */
1514 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1516 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1519 tm->cm_desc.HighPriority.RequestFlags =
1520 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1521 tm->cm_complete = mprsas_abort_complete;
1522 tm->cm_complete_data = (void *)tm;
1523 tm->cm_targ = cm->cm_targ;
1524 tm->cm_lun = cm->cm_lun;
1526 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1527 mprsas_tm_timeout, tm);
1531 err = mpr_map_command(sc, tm);
1533 mprsas_log_command(tm, MPR_RECOVERY,
1534 "error %d sending abort for cm %p SMID %u\n",
1535 err, cm, req->TaskMID);
1541 mprsas_scsiio_timeout(void *data)
1543 struct mpr_softc *sc;
1544 struct mpr_command *cm;
1545 struct mprsas_target *targ;
1547 cm = (struct mpr_command *)data;
1551 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1553 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1556 * Run the interrupt handler to make sure it's not pending. This
1557 * isn't perfect because the command could have already completed
1558 * and been re-used, though this is unlikely.
1560 mpr_intr_locked(sc);
1561 if (cm->cm_state == MPR_CM_STATE_FREE) {
1562 mprsas_log_command(cm, MPR_XINFO,
1563 "SCSI command %p almost timed out\n", cm);
1567 if (cm->cm_ccb == NULL) {
1568 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1575 mprsas_log_command(cm, MPR_XINFO, "command timeout cm %p ccb %p "
1576 "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1578 if (targ->encl_level_valid) {
1579 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
1580 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1581 targ->connector_name);
1584 /* XXX first, check the firmware state, to see if it's still
1585 * operational. if not, do a diag reset.
1588 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
1589 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1590 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1592 if (targ->tm != NULL) {
1593 /* target already in recovery, just queue up another
1594 * timedout command to be processed later.
1596 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1597 "processing by tm %p\n", cm, targ->tm);
1599 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1600 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1603 /* start recovery by aborting the first timedout command */
1604 mprsas_send_abort(sc, targ->tm, cm);
1607 /* XXX queue this target up for recovery once a TM becomes
1608 * available. The firmware only has a limited number of
1609 * HighPriority credits for the high priority requests used
1610 * for task management, and we ran out.
1612 * Isilon: don't worry about this for now, since we have
1613 * more credits than disks in an enclosure, and limit
1614 * ourselves to one TM per target for recovery.
1616 mpr_dprint(sc, MPR_RECOVERY,
1617 "timedout cm %p failed to allocate a tm\n", cm);
1622 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1624 MPI2_SCSI_IO_REQUEST *req;
1625 struct ccb_scsiio *csio;
1626 struct mpr_softc *sc;
1627 struct mprsas_target *targ;
1628 struct mprsas_lun *lun;
1629 struct mpr_command *cm;
1630 uint8_t i, lba_byte, *ref_tag_addr;
1631 uint16_t eedp_flags;
1632 uint32_t mpi_control;
1636 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1639 targ = &sassc->targets[csio->ccb_h.target_id];
1640 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1641 if (targ->handle == 0x0) {
1642 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1643 __func__, csio->ccb_h.target_id);
1644 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1648 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1649 mpr_dprint(sc, MPR_TRACE, "%s Raid component no SCSI IO "
1650 "supported %u\n", __func__, csio->ccb_h.target_id);
1651 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1656 * Sometimes, it is possible to get a command that is not "In
1657 * Progress" and was actually aborted by the upper layer. Check for
1658 * this here and complete the command without error.
1660 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
1661 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1662 "target %u\n", __func__, csio->ccb_h.target_id);
1667 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1668 * that the volume has timed out. We want volumes to be enumerated
1669 * until they are deleted/removed, not just failed.
1671 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1672 if (targ->devinfo == 0)
1673 csio->ccb_h.status = CAM_REQ_CMP;
1675 csio->ccb_h.status = CAM_SEL_TIMEOUT;
1680 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1681 mpr_dprint(sc, MPR_TRACE, "%s shutting down\n", __func__);
1682 csio->ccb_h.status = CAM_DEV_NOT_THERE;
1687 cm = mpr_alloc_command(sc);
1688 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1690 mpr_free_command(sc, cm);
1692 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1693 xpt_freeze_simq(sassc->sim, 1);
1694 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1696 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1697 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1702 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1703 bzero(req, sizeof(*req));
1704 req->DevHandle = htole16(targ->handle);
1705 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1707 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1708 req->SenseBufferLength = MPR_SENSE_LEN;
1710 req->ChainOffset = 0;
1711 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1716 req->DataLength = htole32(csio->dxfer_len);
1717 req->BidirectionalDataLength = 0;
1718 req->IoFlags = htole16(csio->cdb_len);
1721 /* Note: BiDirectional transfers are not supported */
1722 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1724 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1725 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1728 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1729 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1733 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1737 if (csio->cdb_len == 32)
1738 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1740 * It looks like the hardware doesn't require an explicit tag
1741 * number for each transaction. SAM Task Management not supported
1744 switch (csio->tag_action) {
1745 case MSG_HEAD_OF_Q_TAG:
1746 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1748 case MSG_ORDERED_Q_TAG:
1749 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1752 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1754 case CAM_TAG_ACTION_NONE:
1755 case MSG_SIMPLE_Q_TAG:
1757 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1760 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1761 req->Control = htole32(mpi_control);
1763 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1764 mpr_free_command(sc, cm);
1765 ccb->ccb_h.status = CAM_LUN_INVALID;
1770 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1771 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1773 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1774 req->IoFlags = htole16(csio->cdb_len);
1777 * Check if EEDP is supported and enabled. If it is then check if the
1778 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1779 * is formatted for EEDP support. If all of this is true, set CDB up
1780 * for EEDP transfer.
1782 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1783 if (sc->eedp_enabled && eedp_flags) {
1784 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1785 if (lun->lun_id == csio->ccb_h.target_lun) {
1790 if ((lun != NULL) && (lun->eedp_formatted)) {
1791 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1792 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1793 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1794 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1795 req->EEDPFlags = htole16(eedp_flags);
1798 * If CDB less than 32, fill in Primary Ref Tag with
1799 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1800 * already there. Also, set protection bit. FreeBSD
1801 * currently does not support CDBs bigger than 16, but
1802 * the code doesn't hurt, and will be here for the
1805 if (csio->cdb_len != 32) {
1806 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1807 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1808 PrimaryReferenceTag;
1809 for (i = 0; i < 4; i++) {
1811 req->CDB.CDB32[lba_byte + i];
1814 req->CDB.EEDP32.PrimaryReferenceTag =
1816 CDB.EEDP32.PrimaryReferenceTag);
1817 req->CDB.EEDP32.PrimaryApplicationTagMask =
1819 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1823 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1824 req->EEDPFlags = htole16(eedp_flags);
1825 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1831 cm->cm_length = csio->dxfer_len;
1832 if (cm->cm_length != 0) {
1834 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1838 cm->cm_sge = &req->SGL;
1839 cm->cm_sglsize = (32 - 24) * 4;
1840 cm->cm_complete = mprsas_scsiio_complete;
1841 cm->cm_complete_data = ccb;
1843 cm->cm_lun = csio->ccb_h.target_lun;
1846 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1847 * and set descriptor type.
1849 if (targ->scsi_req_desc_type ==
1850 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1851 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1852 cm->cm_desc.FastPathSCSIIO.RequestFlags =
1853 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1854 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1856 cm->cm_desc.SCSIIO.RequestFlags =
1857 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1858 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1861 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1862 mprsas_scsiio_timeout, cm);
1865 targ->outstanding++;
1866 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1867 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1869 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1870 __func__, cm, ccb, targ->outstanding);
1872 mpr_map_command(sc, cm);
1877 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1881 switch (response_code) {
1882 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1883 desc = "task management request completed";
1885 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1886 desc = "invalid frame";
1888 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1889 desc = "task management request not supported";
1891 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1892 desc = "task management request failed";
1894 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1895 desc = "task management request succeeded";
1897 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1898 desc = "invalid lun";
1901 desc = "overlapped tag attempted";
1903 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1904 desc = "task queued, however not sent to target";
1910 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1915 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1918 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1919 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1923 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1924 MPI2_IOCSTATUS_MASK;
1925 u8 scsi_state = mpi_reply->SCSIState;
1926 u8 scsi_status = mpi_reply->SCSIStatus;
1927 char *desc_ioc_state = NULL;
1928 char *desc_scsi_status = NULL;
1929 char *desc_scsi_state = sc->tmp_string;
1930 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1932 if (log_info == 0x31170000)
1935 switch (ioc_status) {
1936 case MPI2_IOCSTATUS_SUCCESS:
1937 desc_ioc_state = "success";
1939 case MPI2_IOCSTATUS_INVALID_FUNCTION:
1940 desc_ioc_state = "invalid function";
1942 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1943 desc_ioc_state = "scsi recovered error";
1945 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1946 desc_ioc_state = "scsi invalid dev handle";
1948 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1949 desc_ioc_state = "scsi device not there";
1951 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1952 desc_ioc_state = "scsi data overrun";
1954 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1955 desc_ioc_state = "scsi data underrun";
1957 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1958 desc_ioc_state = "scsi io data error";
1960 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1961 desc_ioc_state = "scsi protocol error";
1963 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1964 desc_ioc_state = "scsi task terminated";
1966 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1967 desc_ioc_state = "scsi residual mismatch";
1969 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1970 desc_ioc_state = "scsi task mgmt failed";
1972 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1973 desc_ioc_state = "scsi ioc terminated";
1975 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1976 desc_ioc_state = "scsi ext terminated";
1978 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1979 desc_ioc_state = "eedp guard error";
1981 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1982 desc_ioc_state = "eedp ref tag error";
1984 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1985 desc_ioc_state = "eedp app tag error";
1988 desc_ioc_state = "unknown";
1992 switch (scsi_status) {
1993 case MPI2_SCSI_STATUS_GOOD:
1994 desc_scsi_status = "good";
1996 case MPI2_SCSI_STATUS_CHECK_CONDITION:
1997 desc_scsi_status = "check condition";
1999 case MPI2_SCSI_STATUS_CONDITION_MET:
2000 desc_scsi_status = "condition met";
2002 case MPI2_SCSI_STATUS_BUSY:
2003 desc_scsi_status = "busy";
2005 case MPI2_SCSI_STATUS_INTERMEDIATE:
2006 desc_scsi_status = "intermediate";
2008 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2009 desc_scsi_status = "intermediate condmet";
2011 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2012 desc_scsi_status = "reservation conflict";
2014 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2015 desc_scsi_status = "command terminated";
2017 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2018 desc_scsi_status = "task set full";
2020 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2021 desc_scsi_status = "aca active";
2023 case MPI2_SCSI_STATUS_TASK_ABORTED:
2024 desc_scsi_status = "task aborted";
2027 desc_scsi_status = "unknown";
2031 desc_scsi_state[0] = '\0';
2033 desc_scsi_state = " ";
2034 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2035 strcat(desc_scsi_state, "response info ");
2036 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2037 strcat(desc_scsi_state, "state terminated ");
2038 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2039 strcat(desc_scsi_state, "no status ");
2040 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2041 strcat(desc_scsi_state, "autosense failed ");
2042 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2043 strcat(desc_scsi_state, "autosense valid ");
2045 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2046 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2047 if (targ->encl_level_valid) {
2048 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2049 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2050 targ->connector_name);
2052 /* We can add more detail about underflow data here
2055 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2056 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2057 desc_scsi_state, scsi_state);
2059 if (sc->mpr_debug & MPR_XINFO &&
2060 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2061 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2062 scsi_sense_print(csio);
2063 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2066 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2067 response_info = le32toh(mpi_reply->ResponseInfo);
2068 response_bytes = (u8 *)&response_info;
2069 mpr_response_code(sc,response_bytes[0]);
2074 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2076 MPI2_SCSI_IO_REPLY *rep;
2078 struct ccb_scsiio *csio;
2079 struct mprsas_softc *sassc;
2080 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2081 u8 *TLR_bits, TLR_on;
2086 mpr_dprint(sc, MPR_TRACE,
2087 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2088 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2089 cm->cm_targ->outstanding);
2091 callout_stop(&cm->cm_callout);
2092 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2095 ccb = cm->cm_complete_data;
2097 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2099 * XXX KDM if the chain allocation fails, does it matter if we do
2100 * the sync and unload here? It is simpler to do it in every case,
2101 * assuming it doesn't cause problems.
2103 if (cm->cm_data != NULL) {
2104 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2105 dir = BUS_DMASYNC_POSTREAD;
2106 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2107 dir = BUS_DMASYNC_POSTWRITE;
2108 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2109 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2112 cm->cm_targ->completed++;
2113 cm->cm_targ->outstanding--;
2114 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2115 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2117 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2118 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2119 if (cm->cm_reply != NULL)
2120 mprsas_log_command(cm, MPR_RECOVERY,
2121 "completed timedout cm %p ccb %p during recovery "
2122 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2123 le16toh(rep->IOCStatus), rep->SCSIStatus,
2124 rep->SCSIState, le32toh(rep->TransferCount));
2126 mprsas_log_command(cm, MPR_RECOVERY,
2127 "completed timedout cm %p ccb %p during recovery\n",
2129 } else if (cm->cm_targ->tm != NULL) {
2130 if (cm->cm_reply != NULL)
2131 mprsas_log_command(cm, MPR_RECOVERY,
2132 "completed cm %p ccb %p during recovery "
2133 "ioc %x scsi %x state %x xfer %u\n",
2134 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2135 rep->SCSIStatus, rep->SCSIState,
2136 le32toh(rep->TransferCount));
2138 mprsas_log_command(cm, MPR_RECOVERY,
2139 "completed cm %p ccb %p during recovery\n",
2141 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2142 mprsas_log_command(cm, MPR_RECOVERY,
2143 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2146 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2148 * We ran into an error after we tried to map the command,
2149 * so we're getting a callback without queueing the command
2150 * to the hardware. So we set the status here, and it will
2151 * be retained below. We'll go through the "fast path",
2152 * because there can be no reply when we haven't actually
2153 * gone out to the hardware.
2155 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2158 * Currently the only error included in the mask is
2159 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2160 * chain frames. We need to freeze the queue until we get
2161 * a command that completed without this error, which will
2162 * hopefully have some chain frames attached that we can
2163 * use. If we wanted to get smarter about it, we would
2164 * only unfreeze the queue in this condition when we're
2165 * sure that we're getting some chain frames back. That's
2166 * probably unnecessary.
2168 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2169 xpt_freeze_simq(sassc->sim, 1);
2170 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2171 mpr_dprint(sc, MPR_INFO, "Error sending command, "
2172 "freezing SIM queue\n");
2177 * If this is a Start Stop Unit command and it was issued by the driver
2178 * during shutdown, decrement the refcount to account for all of the
2179 * commands that were sent. All SSU commands should be completed before
2180 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2183 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2184 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2188 /* Take the fast path to completion */
2189 if (cm->cm_reply == NULL) {
2190 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2191 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2192 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2194 ccb->ccb_h.status = CAM_REQ_CMP;
2195 ccb->csio.scsi_status = SCSI_STATUS_OK;
2197 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2198 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2199 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2200 mpr_dprint(sc, MPR_XINFO,
2201 "Unfreezing SIM queue\n");
2206 * There are two scenarios where the status won't be
2207 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2208 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2210 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2212 * Freeze the dev queue so that commands are
2213 * executed in the correct order with after error
2216 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2217 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2219 mpr_free_command(sc, cm);
2224 mprsas_log_command(cm, MPR_XINFO,
2225 "ioc %x scsi %x state %x xfer %u\n",
2226 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2227 le32toh(rep->TransferCount));
2229 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2230 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2231 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2233 case MPI2_IOCSTATUS_SUCCESS:
2234 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2236 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2237 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2238 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2240 /* Completion failed at the transport level. */
2241 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2242 MPI2_SCSI_STATE_TERMINATED)) {
2243 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2247 /* In a modern packetized environment, an autosense failure
2248 * implies that there's not much else that can be done to
2249 * recover the command.
2251 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2252 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
2257 * CAM doesn't care about SAS Response Info data, but if this is
2258 * the state check if TLR should be done. If not, clear the
2259 * TLR_bits for the target.
2261 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2262 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2263 == MPR_SCSI_RI_INVALID_FRAME)) {
2264 sc->mapping_table[csio->ccb_h.target_id].TLR_bits =
2265 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2269 * Intentionally override the normal SCSI status reporting
2270 * for these two cases. These are likely to happen in a
2271 * multi-initiator environment, and we want to make sure that
2272 * CAM retries these commands rather than fail them.
2274 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2275 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2276 ccb->ccb_h.status = CAM_REQ_ABORTED;
2280 /* Handle normal status and sense */
2281 csio->scsi_status = rep->SCSIStatus;
2282 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2283 ccb->ccb_h.status = CAM_REQ_CMP;
2285 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
2287 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2288 int sense_len, returned_sense_len;
2290 returned_sense_len = min(le32toh(rep->SenseCount),
2291 sizeof(struct scsi_sense_data));
2292 if (returned_sense_len < csio->sense_len)
2293 csio->sense_resid = csio->sense_len -
2296 csio->sense_resid = 0;
2298 sense_len = min(returned_sense_len,
2299 csio->sense_len - csio->sense_resid);
2300 bzero(&csio->sense_data, sizeof(csio->sense_data));
2301 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2302 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2306 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2307 * and it's page code 0 (Supported Page List), and there is
2308 * inquiry data, and this is for a sequential access device, and
2309 * the device is an SSP target, and TLR is supported by the
2310 * controller, turn the TLR_bits value ON if page 0x90 is
2313 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2314 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2315 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2316 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2317 (csio->data_ptr != NULL) && (((uint8_t *)cm->cm_data)[0] ==
2318 T_SEQUENTIAL) && (sc->control_TLR) &&
2319 (sc->mapping_table[csio->ccb_h.target_id].device_info &
2320 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2321 vpd_list = (struct scsi_vpd_supported_page_list *)
2323 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id].
2325 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2326 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2327 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2328 csio->cdb_io.cdb_bytes[4];
2329 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2330 if (vpd_list->list[i] == 0x90) {
2337 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2338 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2340 * If devinfo is 0 this will be a volume. In that case don't
2341 * tell CAM that the volume is not there. We want volumes to
2342 * be enumerated until they are deleted/removed, not just
2345 if (cm->cm_targ->devinfo == 0)
2346 ccb->ccb_h.status = CAM_REQ_CMP;
2348 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2350 case MPI2_IOCSTATUS_INVALID_SGL:
2351 mpr_print_scsiio_cmd(sc, cm);
2352 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
2354 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2356 * This is one of the responses that comes back when an I/O
2357 * has been aborted. If it is because of a timeout that we
2358 * initiated, just set the status to CAM_CMD_TIMEOUT.
2359 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2360 * command is the same (it gets retried, subject to the
2361 * retry counter), the only difference is what gets printed
2364 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2365 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
2367 ccb->ccb_h.status = CAM_REQ_ABORTED;
2369 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2370 /* resid is ignored for this condition */
2372 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
2374 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2375 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2377 * Since these are generally external (i.e. hopefully
2378 * transient transport-related) errors, retry these without
2379 * decrementing the retry count.
2381 ccb->ccb_h.status = CAM_REQUEUE_REQ;
2382 mprsas_log_command(cm, MPR_INFO,
2383 "terminated ioc %x scsi %x state %x xfer %u\n",
2384 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2385 le32toh(rep->TransferCount));
2387 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2388 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2389 case MPI2_IOCSTATUS_INVALID_VPID:
2390 case MPI2_IOCSTATUS_INVALID_FIELD:
2391 case MPI2_IOCSTATUS_INVALID_STATE:
2392 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2393 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2394 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2395 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2396 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2398 mprsas_log_command(cm, MPR_XINFO,
2399 "completed ioc %x scsi %x state %x xfer %u\n",
2400 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2401 le32toh(rep->TransferCount));
2402 csio->resid = cm->cm_length;
2403 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2407 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2409 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2410 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2411 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2412 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2416 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2417 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2418 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2421 mpr_free_command(sc, cm);
2425 #if __FreeBSD_version >= 900026
2427 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2429 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2430 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2434 ccb = cm->cm_complete_data;
2437 * Currently there should be no way we can hit this case. It only
2438 * happens when we have a failure to allocate chain frames, and SMP
2439 * commands require two S/G elements only. That should be handled
2440 * in the standard request size.
2442 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2443 mpr_dprint(sc, MPR_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2444 __func__, cm->cm_flags);
2445 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2449 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2451 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2452 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2456 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2457 sasaddr = le32toh(req->SASAddress.Low);
2458 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2460 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2461 MPI2_IOCSTATUS_SUCCESS ||
2462 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2463 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2464 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2465 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2469 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address "
2470 "%#jx completed successfully\n", __func__, (uintmax_t)sasaddr);
2472 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2473 ccb->ccb_h.status = CAM_REQ_CMP;
2475 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR;
2479 * We sync in both directions because we had DMAs in the S/G list
2480 * in both directions.
2482 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2483 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2484 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2485 mpr_free_command(sc, cm);
2490 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2493 struct mpr_command *cm;
2494 uint8_t *request, *response;
2495 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2496 struct mpr_softc *sc;
2504 #if __FreeBSD_version >= 1000029
2505 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2506 case CAM_DATA_PADDR:
2507 case CAM_DATA_SG_PADDR:
2509 * XXX We don't yet support physical addresses here.
2511 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2512 "supported\n", __func__);
2513 ccb->ccb_h.status = CAM_REQ_INVALID;
2518 * The chip does not support more than one buffer for the
2519 * request or response.
2521 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2522 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2523 mpr_dprint(sc, MPR_ERROR,
2524 "%s: multiple request or response buffer segments "
2525 "not supported for SMP\n", __func__);
2526 ccb->ccb_h.status = CAM_REQ_INVALID;
2532 * The CAM_SCATTER_VALID flag was originally implemented
2533 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2534 * We have two. So, just take that flag to mean that we
2535 * might have S/G lists, and look at the S/G segment count
2536 * to figure out whether that is the case for each individual
2539 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2540 bus_dma_segment_t *req_sg;
2542 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2543 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2545 request = ccb->smpio.smp_request;
2547 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2548 bus_dma_segment_t *rsp_sg;
2550 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2551 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2553 response = ccb->smpio.smp_response;
2555 case CAM_DATA_VADDR:
2556 request = ccb->smpio.smp_request;
2557 response = ccb->smpio.smp_response;
2560 ccb->ccb_h.status = CAM_REQ_INVALID;
2564 #else //__FreeBSD_version < 1000029
2566 * XXX We don't yet support physical addresses here.
2568 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2569 mpr_printf(sc, "%s: physical addresses not supported\n",
2571 ccb->ccb_h.status = CAM_REQ_INVALID;
2577 * If the user wants to send an S/G list, check to make sure they
2578 * have single buffers.
2580 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2582 * The chip does not support more than one buffer for the
2583 * request or response.
2585 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2586 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2587 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2588 "response buffer segments not supported for SMP\n",
2590 ccb->ccb_h.status = CAM_REQ_INVALID;
2596 * The CAM_SCATTER_VALID flag was originally implemented
2597 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2598 * We have two. So, just take that flag to mean that we
2599 * might have S/G lists, and look at the S/G segment count
2600 * to figure out whether that is the case for each individual
2603 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2604 bus_dma_segment_t *req_sg;
2606 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2607 request = (uint8_t *)req_sg[0].ds_addr;
2609 request = ccb->smpio.smp_request;
2611 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2612 bus_dma_segment_t *rsp_sg;
2614 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2615 response = (uint8_t *)rsp_sg[0].ds_addr;
2617 response = ccb->smpio.smp_response;
2619 request = ccb->smpio.smp_request;
2620 response = ccb->smpio.smp_response;
2622 #endif //__FreeBSD_version >= 1000029
2624 cm = mpr_alloc_command(sc);
2626 mpr_dprint(sc, MPR_ERROR,
2627 "%s: cannot allocate command\n", __func__);
2628 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2633 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2634 bzero(req, sizeof(*req));
2635 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2637 /* Allow the chip to use any route to this SAS address. */
2638 req->PhysicalPort = 0xff;
2640 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2642 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2644 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2645 "%#jx\n", __func__, (uintmax_t)sasaddr);
2647 mpr_init_sge(cm, req, &req->SGL);
2650 * Set up a uio to pass into mpr_map_command(). This allows us to
2651 * do one map command, and one busdma call in there.
2653 cm->cm_uio.uio_iov = cm->cm_iovec;
2654 cm->cm_uio.uio_iovcnt = 2;
2655 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2658 * The read/write flag isn't used by busdma, but set it just in
2659 * case. This isn't exactly accurate, either, since we're going in
2662 cm->cm_uio.uio_rw = UIO_WRITE;
2664 cm->cm_iovec[0].iov_base = request;
2665 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2666 cm->cm_iovec[1].iov_base = response;
2667 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2669 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2670 cm->cm_iovec[1].iov_len;
2673 * Trigger a warning message in mpr_data_cb() for the user if we
2674 * wind up exceeding two S/G segments. The chip expects one
2675 * segment for the request and another for the response.
2677 cm->cm_max_segs = 2;
2679 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2680 cm->cm_complete = mprsas_smpio_complete;
2681 cm->cm_complete_data = ccb;
2684 * Tell the mapping code that we're using a uio, and that this is
2685 * an SMP passthrough request. There is a little special-case
2686 * logic there (in mpr_data_cb()) to handle the bidirectional
2689 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2690 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2692 /* The chip data format is little endian. */
2693 req->SASAddress.High = htole32(sasaddr >> 32);
2694 req->SASAddress.Low = htole32(sasaddr);
2697 * XXX Note that we don't have a timeout/abort mechanism here.
2698 * From the manual, it looks like task management requests only
2699 * work for SCSI IO and SATA passthrough requests. We may need to
2700 * have a mechanism to retry requests in the event of a chip reset
2701 * at least. Hopefully the chip will insure that any errors short
2702 * of that are relayed back to the driver.
2704 error = mpr_map_command(sc, cm);
2705 if ((error != 0) && (error != EINPROGRESS)) {
2706 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2707 "mpr_map_command()\n", __func__, error);
2714 mpr_free_command(sc, cm);
2715 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2721 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2723 struct mpr_softc *sc;
2724 struct mprsas_target *targ;
2725 uint64_t sasaddr = 0;
2730 * Make sure the target exists.
2732 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2733 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2734 targ = &sassc->targets[ccb->ccb_h.target_id];
2735 if (targ->handle == 0x0) {
2736 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2737 __func__, ccb->ccb_h.target_id);
2738 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2744 * If this device has an embedded SMP target, we'll talk to it
2746 * figure out what the expander's address is.
2748 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2749 sasaddr = targ->sasaddr;
2752 * If we don't have a SAS address for the expander yet, try
2753 * grabbing it from the page 0x83 information cached in the
2754 * transport layer for this target. LSI expanders report the
2755 * expander SAS address as the port-associated SAS address in
2756 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2759 * XXX KDM disable this for now, but leave it commented out so that
2760 * it is obvious that this is another possible way to get the SAS
2763 * The parent handle method below is a little more reliable, and
2764 * the other benefit is that it works for devices other than SES
2765 * devices. So you can send a SMP request to a da(4) device and it
2766 * will get routed to the expander that device is attached to.
2767 * (Assuming the da(4) device doesn't contain an SMP target...)
2771 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2775 * If we still don't have a SAS address for the expander, look for
2776 * the parent device of this device, which is probably the expander.
2779 #ifdef OLD_MPR_PROBE
2780 struct mprsas_target *parent_target;
2783 if (targ->parent_handle == 0x0) {
2784 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2785 "a valid parent handle!\n", __func__, targ->handle);
2786 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2789 #ifdef OLD_MPR_PROBE
2790 parent_target = mprsas_find_target_by_handle(sassc, 0,
2791 targ->parent_handle);
2793 if (parent_target == NULL) {
2794 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2795 "a valid parent target!\n", __func__, targ->handle);
2796 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2800 if ((parent_target->devinfo &
2801 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2802 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2803 "does not have an SMP target!\n", __func__,
2804 targ->handle, parent_target->handle);
2805 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2810 sasaddr = parent_target->sasaddr;
2811 #else /* OLD_MPR_PROBE */
2812 if ((targ->parent_devinfo &
2813 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2814 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2815 "does not have an SMP target!\n", __func__,
2816 targ->handle, targ->parent_handle);
2817 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2821 if (targ->parent_sasaddr == 0x0) {
2822 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2823 "%d does not have a valid SAS address!\n", __func__,
2824 targ->handle, targ->parent_handle);
2825 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2829 sasaddr = targ->parent_sasaddr;
2830 #endif /* OLD_MPR_PROBE */
2835 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2836 "handle %d\n", __func__, targ->handle);
2837 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2840 mprsas_send_smpcmd(sassc, ccb, sasaddr);
2848 #endif //__FreeBSD_version >= 900026
2851 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2853 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2854 struct mpr_softc *sc;
2855 struct mpr_command *tm;
2856 struct mprsas_target *targ;
2858 MPR_FUNCTRACE(sassc->sc);
2859 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2861 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2862 ("Target %d out of bounds in XPT_RESET_DEV\n",
2863 ccb->ccb_h.target_id));
2865 tm = mpr_alloc_command(sc);
2867 mpr_dprint(sc, MPR_ERROR,
2868 "command alloc failure in mprsas_action_resetdev\n");
2869 ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
2874 targ = &sassc->targets[ccb->ccb_h.target_id];
2875 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2876 req->DevHandle = htole16(targ->handle);
2877 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2878 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2880 /* SAS Hard Link Reset / SATA Link Reset */
2881 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2884 tm->cm_desc.HighPriority.RequestFlags =
2885 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2886 tm->cm_complete = mprsas_resetdev_complete;
2887 tm->cm_complete_data = ccb;
2889 mpr_map_command(sc, tm);
2893 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2895 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2899 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2901 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2902 ccb = tm->cm_complete_data;
2905 * Currently there should be no way we can hit this case. It only
2906 * happens when we have a failure to allocate chain frames, and
2907 * task management commands don't have S/G lists.
2909 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2910 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2912 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2914 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
2915 "handle %#04x! This should not happen!\n", __func__,
2916 tm->cm_flags, req->DevHandle);
2917 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2921 mpr_dprint(sc, MPR_XINFO,
2922 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
2923 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
2925 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
2926 ccb->ccb_h.status = CAM_REQ_CMP;
2927 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
2931 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2935 mprsas_free_tm(sc, tm);
2940 mprsas_poll(struct cam_sim *sim)
2942 struct mprsas_softc *sassc;
2944 sassc = cam_sim_softc(sim);
2946 if (sassc->sc->mpr_debug & MPR_TRACE) {
2947 /* frequent debug messages during a panic just slow
2948 * everything down too much.
2950 mpr_printf(sassc->sc, "%s clearing MPR_TRACE\n", __func__);
2951 sassc->sc->mpr_debug &= ~MPR_TRACE;
2954 mpr_intr_locked(sassc->sc);
2958 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
2961 struct mpr_softc *sc;
2963 sc = (struct mpr_softc *)callback_arg;
2966 #if (__FreeBSD_version >= 1000006) || \
2967 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
2968 case AC_ADVINFO_CHANGED: {
2969 struct mprsas_target *target;
2970 struct mprsas_softc *sassc;
2971 struct scsi_read_capacity_data_long rcap_buf;
2972 struct ccb_dev_advinfo cdai;
2973 struct mprsas_lun *lun;
2978 buftype = (uintptr_t)arg;
2984 * We're only interested in read capacity data changes.
2986 if (buftype != CDAI_TYPE_RCAPLONG)
2990 * We should have a handle for this, but check to make sure.
2992 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
2993 ("Target %d out of bounds in mprsas_async\n",
2994 xpt_path_target_id(path)));
2995 target = &sassc->targets[xpt_path_target_id(path)];
2996 if (target->handle == 0)
2999 lunid = xpt_path_lun_id(path);
3001 SLIST_FOREACH(lun, &target->luns, lun_link) {
3002 if (lun->lun_id == lunid) {
3008 if (found_lun == 0) {
3009 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3012 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3013 "LUN for EEDP support.\n");
3016 lun->lun_id = lunid;
3017 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3020 bzero(&rcap_buf, sizeof(rcap_buf));
3021 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3022 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3023 cdai.ccb_h.flags = CAM_DIR_IN;
3024 cdai.buftype = CDAI_TYPE_RCAPLONG;
3026 cdai.bufsiz = sizeof(rcap_buf);
3027 cdai.buf = (uint8_t *)&rcap_buf;
3028 xpt_action((union ccb *)&cdai);
3029 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3030 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3032 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP)
3033 && (rcap_buf.prot & SRC16_PROT_EN)) {
3034 lun->eedp_formatted = TRUE;
3035 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3037 lun->eedp_formatted = FALSE;
3038 lun->eedp_block_size = 0;
3043 case AC_FOUND_DEVICE: {
3044 struct ccb_getdev *cgd;
3047 mprsas_prepare_ssu(sc, path, cgd);
3048 #if (__FreeBSD_version < 901503) || \
3049 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3050 mprsas_check_eedp(sc, path, cgd);
3060 mprsas_prepare_ssu(struct mpr_softc *sc, struct cam_path *path,
3061 struct ccb_getdev *cgd)
3063 struct mprsas_softc *sassc = sc->sassc;
3065 target_id_t targetid;
3067 struct mprsas_target *target;
3068 struct mprsas_lun *lun;
3072 pathid = cam_sim_path(sassc->sim);
3073 targetid = xpt_path_target_id(path);
3074 lunid = xpt_path_lun_id(path);
3076 KASSERT(targetid < sassc->maxtargets,
3077 ("Target %d out of bounds in mprsas_prepare_ssu\n", targetid));
3078 target = &sassc->targets[targetid];
3079 if (target->handle == 0x0)
3083 * If LUN is already in list, don't create a new one.
3086 SLIST_FOREACH(lun, &target->luns, lun_link) {
3087 if (lun->lun_id == lunid) {
3093 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3096 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3097 "preparing SSU.\n");
3100 lun->lun_id = lunid;
3101 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3105 * If this is a SATA direct-access end device, mark it so that a SCSI
3106 * StartStopUnit command will be sent to it when the driver is being
3109 if (((cgd->inq_data.device & 0x1F) == T_DIRECT) &&
3110 (target->devinfo & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
3111 ((target->devinfo & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
3112 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
3113 lun->stop_at_shutdown = TRUE;
3117 #if (__FreeBSD_version < 901503) || \
3118 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3120 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3121 struct ccb_getdev *cgd)
3123 struct mprsas_softc *sassc = sc->sassc;
3124 struct ccb_scsiio *csio;
3125 struct scsi_read_capacity_16 *scsi_cmd;
3126 struct scsi_read_capacity_eedp *rcap_buf;
3128 target_id_t targetid;
3131 struct cam_path *local_path;
3132 struct mprsas_target *target;
3133 struct mprsas_lun *lun;
3138 pathid = cam_sim_path(sassc->sim);
3139 targetid = xpt_path_target_id(path);
3140 lunid = xpt_path_lun_id(path);
3142 KASSERT(targetid < sassc->maxtargets,
3143 ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3144 target = &sassc->targets[targetid];
3145 if (target->handle == 0x0)
3149 * Determine if the device is EEDP capable.
3151 * If this flag is set in the inquiry data, the device supports
3152 * protection information, and must support the 16 byte read capacity
3153 * command, otherwise continue without sending read cap 16
3155 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3159 * Issue a READ CAPACITY 16 command. This info is used to determine if
3160 * the LUN is formatted for EEDP support.
3162 ccb = xpt_alloc_ccb_nowait();
3164 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3169 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3171 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3178 * If LUN is already in list, don't create a new one.
3181 SLIST_FOREACH(lun, &target->luns, lun_link) {
3182 if (lun->lun_id == lunid) {
3188 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3191 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3193 xpt_free_path(local_path);
3197 lun->lun_id = lunid;
3198 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3201 xpt_path_string(local_path, path_str, sizeof(path_str));
3202 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3203 path_str, target->handle);
3206 * Issue a READ CAPACITY 16 command for the LUN. The
3207 * mprsas_read_cap_done function will load the read cap info into the
3210 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3212 if (rcap_buf == NULL) {
3213 mpr_dprint(sc, MPR_FAULT, "Unable to alloc read capacity "
3214 "buffer for EEDP support.\n");
3215 xpt_free_path(ccb->ccb_h.path);
3219 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3221 csio->ccb_h.func_code = XPT_SCSI_IO;
3222 csio->ccb_h.flags = CAM_DIR_IN;
3223 csio->ccb_h.retry_count = 4;
3224 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3225 csio->ccb_h.timeout = 60000;
3226 csio->data_ptr = (uint8_t *)rcap_buf;
3227 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3228 csio->sense_len = MPR_SENSE_LEN;
3229 csio->cdb_len = sizeof(*scsi_cmd);
3230 csio->tag_action = MSG_SIMPLE_Q_TAG;
3232 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3233 bzero(scsi_cmd, sizeof(*scsi_cmd));
3234 scsi_cmd->opcode = 0x9E;
3235 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3236 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3238 ccb->ccb_h.ppriv_ptr1 = sassc;
3243 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3245 struct mprsas_softc *sassc;
3246 struct mprsas_target *target;
3247 struct mprsas_lun *lun;
3248 struct scsi_read_capacity_eedp *rcap_buf;
3250 if (done_ccb == NULL)
3253 /* Driver need to release devq, it Scsi command is
3254 * generated by driver internally.
3255 * Currently there is a single place where driver
3256 * calls scsi command internally. In future if driver
3257 * calls more scsi command internally, it needs to release
3258 * devq internally, since those command will not go back to
3261 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3262 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3263 xpt_release_devq(done_ccb->ccb_h.path,
3264 /*count*/ 1, /*run_queue*/TRUE);
3267 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3270 * Get the LUN ID for the path and look it up in the LUN list for the
3273 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3274 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3275 ("Target %d out of bounds in mprsas_read_cap_done\n",
3276 done_ccb->ccb_h.target_id));
3277 target = &sassc->targets[done_ccb->ccb_h.target_id];
3278 SLIST_FOREACH(lun, &target->luns, lun_link) {
3279 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3283 * Got the LUN in the target's LUN list. Fill it in with EEDP
3284 * info. If the READ CAP 16 command had some SCSI error (common
3285 * if command is not supported), mark the lun as not supporting
3286 * EEDP and set the block size to 0.
3288 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
3289 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3290 lun->eedp_formatted = FALSE;
3291 lun->eedp_block_size = 0;
3295 if (rcap_buf->protect & 0x01) {
3296 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for "
3297 "target ID %d is formatted for EEDP "
3298 "support.\n", done_ccb->ccb_h.target_lun,
3299 done_ccb->ccb_h.target_id);
3300 lun->eedp_formatted = TRUE;
3301 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3306 // Finished with this CCB and path.
3307 free(rcap_buf, M_MPR);
3308 xpt_free_path(done_ccb->ccb_h.path);
3309 xpt_free_ccb(done_ccb);
3311 #endif /* (__FreeBSD_version < 901503) || \
3312 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3315 mprsas_startup(struct mpr_softc *sc)
3318 * Send the port enable message and set the wait_for_port_enable flag.
3319 * This flag helps to keep the simq frozen until all discovery events
3322 sc->wait_for_port_enable = 1;
3323 mprsas_send_portenable(sc);
3328 mprsas_send_portenable(struct mpr_softc *sc)
3330 MPI2_PORT_ENABLE_REQUEST *request;
3331 struct mpr_command *cm;
3335 if ((cm = mpr_alloc_command(sc)) == NULL)
3337 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3338 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3339 request->MsgFlags = 0;
3341 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3342 cm->cm_complete = mprsas_portenable_complete;
3346 mpr_map_command(sc, cm);
3347 mpr_dprint(sc, MPR_XINFO,
3348 "mpr_send_portenable finished cm %p req %p complete %p\n",
3349 cm, cm->cm_req, cm->cm_complete);
3354 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3356 MPI2_PORT_ENABLE_REPLY *reply;
3357 struct mprsas_softc *sassc;
3363 * Currently there should be no way we can hit this case. It only
3364 * happens when we have a failure to allocate chain frames, and
3365 * port enable commands don't have S/G lists.
3367 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3368 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3369 "This should not happen!\n", __func__, cm->cm_flags);
3372 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3374 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3375 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3376 MPI2_IOCSTATUS_SUCCESS)
3377 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3379 mpr_free_command(sc, cm);
3380 if (sc->mpr_ich.ich_arg != NULL) {
3381 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3382 config_intrhook_disestablish(&sc->mpr_ich);
3383 sc->mpr_ich.ich_arg = NULL;
3387 * Done waiting for port enable to complete. Decrement the refcount.
3388 * If refcount is 0, discovery is complete and a rescan of the bus can
3391 sc->wait_for_port_enable = 0;
3392 sc->port_enable_complete = 1;
3393 wakeup(&sc->port_enable_complete);
3394 mprsas_startup_decrement(sassc);
3398 mprsas_check_id(struct mprsas_softc *sassc, int id)
3400 struct mpr_softc *sc = sassc->sc;
3404 ids = &sc->exclude_ids[0];
3405 while((name = strsep(&ids, ",")) != NULL) {
3406 if (name[0] == '\0')
3408 if (strtol(name, NULL, 0) == (long)id)