2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2015 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for Avago Technologies (LSI) MPT3 */
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
75 #include <dev/mpr/mpi/mpi2_type.h>
76 #include <dev/mpr/mpi/mpi2.h>
77 #include <dev/mpr/mpi/mpi2_ioc.h>
78 #include <dev/mpr/mpi/mpi2_sas.h>
79 #include <dev/mpr/mpi/mpi2_cnfg.h>
80 #include <dev/mpr/mpi/mpi2_init.h>
81 #include <dev/mpr/mpi/mpi2_tool.h>
82 #include <dev/mpr/mpr_ioctl.h>
83 #include <dev/mpr/mprvar.h>
84 #include <dev/mpr/mpr_table.h>
85 #include <dev/mpr/mpr_sas.h>
87 #define MPRSAS_DISCOVERY_TIMEOUT 20
88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
91 * static array to check SCSI OpCode for EEDP protection bits
93 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
94 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 static uint8_t op_code_prot[256] = {
97 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
106 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
120 static void mprsas_poll(struct cam_sim *sim);
121 static void mprsas_scsiio_timeout(void *data);
122 static void mprsas_abort_complete(struct mpr_softc *sc,
123 struct mpr_command *cm);
124 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
125 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
126 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
127 static void mprsas_resetdev_complete(struct mpr_softc *,
128 struct mpr_command *);
129 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130 struct mpr_command *cm);
131 static void mprsas_async(void *callback_arg, uint32_t code,
132 struct cam_path *path, void *arg);
133 #if (__FreeBSD_version < 901503) || \
134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136 struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138 union ccb *done_ccb);
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142 struct mpr_command *cm);
144 #if __FreeBSD_version >= 900026
145 static void mprsas_smpio_complete(struct mpr_softc *sc,
146 struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148 union ccb *ccb, uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
156 struct mprsas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mprsas_startup_increment(struct mprsas_softc *sassc)
178 MPR_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mpr_dprint(sassc->sc, MPR_INIT,
184 "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
224 mprsas_rescan_target(sassc->sc, NULL);
227 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 sassc->startup_refcount);
232 /* The firmware requires us to stop sending commands when we're doing task
233 * management, so refcount the TMs and keep the simq frozen when any are in
237 mprsas_alloc_tm(struct mpr_softc *sc)
239 struct mpr_command *tm;
242 tm = mpr_alloc_high_priority_command(sc);
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
254 * For TM's the devq is frozen for the device. Unfreeze it here and
255 * free the resources used for freezing the devq. Must clear the
256 * INRESET flag as well or scsi I/O will not work.
258 if (tm->cm_targ != NULL) {
259 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
264 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
265 xpt_free_path(tm->cm_ccb->ccb_h.path);
266 xpt_free_ccb(tm->cm_ccb);
269 mpr_free_high_priority_command(sc, tm);
273 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
275 struct mprsas_softc *sassc = sc->sassc;
277 target_id_t targetid;
281 pathid = cam_sim_path(sassc->sim);
283 targetid = CAM_TARGET_WILDCARD;
285 targetid = targ - sassc->targets;
288 * Allocate a CCB and schedule a rescan.
290 ccb = xpt_alloc_ccb_nowait();
292 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
297 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
298 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
303 if (targetid == CAM_TARGET_WILDCARD)
304 ccb->ccb_h.func_code = XPT_SCAN_BUS;
306 ccb->ccb_h.func_code = XPT_SCAN_TGT;
308 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
313 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
323 /* No need to be in here if debugging isn't enabled */
324 if ((cm->cm_sc->mpr_debug & level) == 0)
327 sbuf_new(&sb, str, sizeof(str), 0);
331 if (cm->cm_ccb != NULL) {
332 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
334 sbuf_cat(&sb, path_str);
335 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
336 scsi_command_string(&cm->cm_ccb->csio, &sb);
337 sbuf_printf(&sb, "length %d ",
338 cm->cm_ccb->csio.dxfer_len);
341 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 cam_sim_name(cm->cm_sc->sassc->sim),
343 cam_sim_unit(cm->cm_sc->sassc->sim),
344 cam_sim_bus(cm->cm_sc->sassc->sim),
345 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 sbuf_vprintf(&sb, fmt, ap);
352 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
358 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
360 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
361 struct mprsas_target *targ;
366 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
367 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 /* XXX retry the remove after the diag reset completes? */
372 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
373 "0x%04x\n", __func__, handle);
374 mprsas_free_tm(sc, tm);
378 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
379 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
380 "device 0x%x\n", reply->IOCStatus, handle);
381 mprsas_free_tm(sc, tm);
385 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
386 reply->TerminationCount);
387 mpr_free_reply(sc, tm->cm_reply_data);
388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
390 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
394 * Don't clear target if remove fails because things will get confusing.
395 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 * this target id if possible, and so we can assign the same target id
397 * to this device if it comes back in the future.
399 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
402 targ->encl_handle = 0x0;
403 targ->encl_level_valid = 0x0;
404 targ->encl_level = 0x0;
405 targ->connector_name[0] = ' ';
406 targ->connector_name[1] = ' ';
407 targ->connector_name[2] = ' ';
408 targ->connector_name[3] = ' ';
409 targ->encl_slot = 0x0;
410 targ->exp_dev_handle = 0x0;
412 targ->linkrate = 0x0;
415 targ->scsi_req_desc_type = 0;
418 mprsas_free_tm(sc, tm);
423 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
424 * Otherwise Volume Delete is same as Bare Drive Removal.
427 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
429 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
430 struct mpr_softc *sc;
431 struct mpr_command *cm;
432 struct mprsas_target *targ = NULL;
434 MPR_FUNCTRACE(sassc->sc);
437 targ = mprsas_find_target_by_handle(sassc, 0, handle);
439 /* FIXME: what is the action? */
440 /* We don't know about this device? */
441 mpr_dprint(sc, MPR_ERROR,
442 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 targ->flags |= MPRSAS_TARGET_INREMOVAL;
448 cm = mprsas_alloc_tm(sc);
450 mpr_dprint(sc, MPR_ERROR,
451 "%s: command alloc failure\n", __func__);
455 mprsas_rescan_target(sc, targ);
457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
458 req->DevHandle = targ->handle;
459 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
460 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
462 /* SAS Hard Link Reset / SATA Link Reset */
463 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 cm->cm_desc.HighPriority.RequestFlags =
468 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
469 cm->cm_complete = mprsas_remove_volume;
470 cm->cm_complete_data = (void *)(uintptr_t)handle;
472 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
473 __func__, targ->tid);
474 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
476 mpr_map_command(sc, cm);
480 * The MPT3 firmware performs debounce on the link to avoid transient link
481 * errors and false removals. When it does decide that link has been lost
482 * and a device needs to go away, it expects that the host will perform a
483 * target reset and then an op remove. The reset has the side-effect of
484 * aborting any outstanding requests for the device, which is required for
485 * the op-remove to succeed. It's not clear if the host should check for
486 * the device coming back alive after the reset.
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
491 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 struct mpr_softc *sc;
493 struct mpr_command *cm;
494 struct mprsas_target *targ = NULL;
496 MPR_FUNCTRACE(sassc->sc);
500 targ = mprsas_find_target_by_handle(sassc, 0, handle);
502 /* FIXME: what is the action? */
503 /* We don't know about this device? */
504 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
509 targ->flags |= MPRSAS_TARGET_INREMOVAL;
511 cm = mprsas_alloc_tm(sc);
513 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
518 mprsas_rescan_target(sc, targ);
520 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 memset(req, 0, sizeof(*req));
522 req->DevHandle = htole16(targ->handle);
523 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
526 /* SAS Hard Link Reset / SATA Link Reset */
527 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 cm->cm_desc.HighPriority.RequestFlags =
532 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 cm->cm_complete = mprsas_remove_device;
534 cm->cm_complete_data = (void *)(uintptr_t)handle;
536 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
537 __func__, targ->tid);
538 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
540 mpr_map_command(sc, cm);
544 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
546 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
547 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
548 struct mprsas_target *targ;
549 struct mpr_command *next_cm;
554 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
555 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 * Currently there should be no way we can hit this case. It only
560 * happens when we have a failure to allocate chain frames, and
561 * task management commands don't have S/G lists.
563 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
564 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
565 "handle %#04x! This should not happen!\n", __func__,
566 tm->cm_flags, handle);
567 mprsas_free_tm(sc, tm);
572 /* XXX retry the remove after the diag reset completes? */
573 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
574 "0x%04x\n", __func__, handle);
575 mprsas_free_tm(sc, tm);
579 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
580 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
581 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
582 mprsas_free_tm(sc, tm);
586 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 le32toh(reply->TerminationCount));
588 mpr_free_reply(sc, tm->cm_reply_data);
589 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
591 /* Reuse the existing command */
592 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 memset(req, 0, sizeof(*req));
594 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 req->DevHandle = htole16(handle);
598 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 tm->cm_complete = mprsas_remove_complete;
600 tm->cm_complete_data = (void *)(uintptr_t)handle;
602 mpr_map_command(sc, tm);
604 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
606 if (targ->encl_level_valid) {
607 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 targ->connector_name);
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mprsas_scsiio_complete(sc, tm);
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
626 struct mprsas_target *targ;
627 struct mprsas_lun *lun;
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
639 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 "handle %#04x! This should not happen!\n", __func__,
642 tm->cm_flags, handle);
643 mprsas_free_tm(sc, tm);
648 /* most likely a chip reset */
649 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 "0x%04x\n", __func__, handle);
651 mprsas_free_tm(sc, tm);
655 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 __func__, handle, le16toh(reply->IOCStatus));
659 * Don't clear target if remove fails because things will get confusing.
660 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 * this target id if possible, and so we can assign the same target id
662 * to this device if it comes back in the future.
664 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
667 targ->encl_handle = 0x0;
668 targ->encl_level_valid = 0x0;
669 targ->encl_level = 0x0;
670 targ->connector_name[0] = ' ';
671 targ->connector_name[1] = ' ';
672 targ->connector_name[2] = ' ';
673 targ->connector_name[3] = ' ';
674 targ->encl_slot = 0x0;
675 targ->exp_dev_handle = 0x0;
677 targ->linkrate = 0x0;
680 targ->scsi_req_desc_type = 0;
682 while (!SLIST_EMPTY(&targ->luns)) {
683 lun = SLIST_FIRST(&targ->luns);
684 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
689 mprsas_free_tm(sc, tm);
693 mprsas_register_events(struct mpr_softc *sc)
698 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
700 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
701 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
703 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
704 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
705 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
706 setbit(events, MPI2_EVENT_IR_VOLUME);
707 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
708 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
709 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
712 &sc->sassc->mprsas_eh);
718 mpr_attach_sas(struct mpr_softc *sc)
720 struct mprsas_softc *sassc;
726 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
728 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
734 * XXX MaxTargets could change during a reinit. Since we don't
735 * resize the targets[] array during such an event, cache the value
736 * of MaxTargets here so that we don't get into trouble later. This
737 * should move into the reinit logic.
739 sassc->maxtargets = sc->facts->MaxTargets;
740 sassc->targets = malloc(sizeof(struct mprsas_target) *
741 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
742 if (!sassc->targets) {
743 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
751 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
752 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
757 unit = device_get_unit(sc->mpr_dev);
758 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
759 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
760 if (sassc->sim == NULL) {
761 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
766 TAILQ_INIT(&sassc->ev_queue);
768 /* Initialize taskqueue for Event Handling */
769 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
770 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
771 taskqueue_thread_enqueue, &sassc->ev_tq);
773 /* Run the task queue with lowest priority */
774 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq",
775 device_get_nameunit(sc->mpr_dev));
780 * XXX There should be a bus for every port on the adapter, but since
781 * we're just going to fake the topology for now, we'll pretend that
782 * everything is just a target on a single bus.
784 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
785 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
792 * Assume that discovery events will start right away.
794 * Hold off boot until discovery is complete.
796 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
797 sc->sassc->startup_refcount = 0;
798 mprsas_startup_increment(sassc);
800 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
803 * Register for async events so we can determine the EEDP
804 * capabilities of devices.
806 status = xpt_create_path(&sassc->path, /*periph*/NULL,
807 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
809 if (status != CAM_REQ_CMP) {
810 mpr_printf(sc, "Error %#x creating sim path\n", status);
815 #if (__FreeBSD_version >= 1000006) || \
816 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
817 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
819 event = AC_FOUND_DEVICE;
823 * Prior to the CAM locking improvements, we can't call
824 * xpt_register_async() with a particular path specified.
826 * If a path isn't specified, xpt_register_async() will
827 * generate a wildcard path and acquire the XPT lock while
828 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
829 * It will then drop the XPT lock once that is done.
831 * If a path is specified for xpt_register_async(), it will
832 * not acquire and drop the XPT lock around the call to
833 * xpt_action(). xpt_action() asserts that the caller
834 * holds the SIM lock, so the SIM lock has to be held when
835 * calling xpt_register_async() when the path is specified.
837 * But xpt_register_async calls xpt_for_all_devices(),
838 * which calls xptbustraverse(), which will acquire each
839 * SIM lock. When it traverses our particular bus, it will
840 * necessarily acquire the SIM lock, which will lead to a
841 * recursive lock acquisition.
843 * The CAM locking changes fix this problem by acquiring
844 * the XPT topology lock around bus traversal in
845 * xptbustraverse(), so the caller can hold the SIM lock
846 * and it does not cause a recursive lock acquisition.
848 * These __FreeBSD_version values are approximate, especially
849 * for stable/10, which is two months later than the actual
853 #if (__FreeBSD_version < 1000703) || \
854 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
856 status = xpt_register_async(event, mprsas_async, sc,
860 status = xpt_register_async(event, mprsas_async, sc,
864 if (status != CAM_REQ_CMP) {
865 mpr_dprint(sc, MPR_ERROR,
866 "Error %#x registering async handler for "
867 "AC_ADVINFO_CHANGED events\n", status);
868 xpt_free_path(sassc->path);
872 if (status != CAM_REQ_CMP) {
874 * EEDP use is the exception, not the rule.
875 * Warn the user, but do not fail to attach.
877 mpr_printf(sc, "EEDP capabilities disabled.\n");
882 mprsas_register_events(sc);
890 mpr_detach_sas(struct mpr_softc *sc)
892 struct mprsas_softc *sassc;
893 struct mprsas_lun *lun, *lun_tmp;
894 struct mprsas_target *targ;
899 if (sc->sassc == NULL)
903 mpr_deregister_events(sc, sassc->mprsas_eh);
906 * Drain and free the event handling taskqueue with the lock
907 * unheld so that any parallel processing tasks drain properly
908 * without deadlocking.
910 if (sassc->ev_tq != NULL)
911 taskqueue_free(sassc->ev_tq);
913 /* Make sure CAM doesn't wedge if we had to bail out early. */
916 /* Deregister our async handler */
917 if (sassc->path != NULL) {
918 xpt_register_async(0, mprsas_async, sc, sassc->path);
919 xpt_free_path(sassc->path);
923 if (sassc->flags & MPRSAS_IN_STARTUP)
924 xpt_release_simq(sassc->sim, 1);
926 if (sassc->sim != NULL) {
927 xpt_bus_deregister(cam_sim_path(sassc->sim));
928 cam_sim_free(sassc->sim, FALSE);
931 sassc->flags |= MPRSAS_SHUTDOWN;
934 if (sassc->devq != NULL)
935 cam_simq_free(sassc->devq);
937 for (i = 0; i < sassc->maxtargets; i++) {
938 targ = &sassc->targets[i];
939 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
943 free(sassc->targets, M_MPR);
951 mprsas_discovery_end(struct mprsas_softc *sassc)
953 struct mpr_softc *sc = sassc->sc;
957 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
958 callout_stop(&sassc->discovery_callout);
963 mprsas_action(struct cam_sim *sim, union ccb *ccb)
965 struct mprsas_softc *sassc;
967 sassc = cam_sim_softc(sim);
969 MPR_FUNCTRACE(sassc->sc);
970 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
971 ccb->ccb_h.func_code);
972 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
974 switch (ccb->ccb_h.func_code) {
977 struct ccb_pathinq *cpi = &ccb->cpi;
979 cpi->version_num = 1;
980 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
981 cpi->target_sprt = 0;
982 #if (__FreeBSD_version >= 1000039) || \
983 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
984 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
986 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
988 cpi->hba_eng_cnt = 0;
989 cpi->max_target = sassc->maxtargets - 1;
991 cpi->initiator_id = sassc->maxtargets - 1;
992 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
993 strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN);
994 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
995 cpi->unit_number = cam_sim_unit(sim);
996 cpi->bus_id = cam_sim_bus(sim);
998 * XXXSLM-I think this needs to change based on config page or
999 * something instead of hardcoded to 150000.
1001 cpi->base_transfer_speed = 150000;
1002 cpi->transport = XPORT_SAS;
1003 cpi->transport_version = 0;
1004 cpi->protocol = PROTO_SCSI;
1005 cpi->protocol_version = SCSI_REV_SPC;
1006 #if __FreeBSD_version >= 800001
1008 * XXXSLM-probably need to base this number on max SGL's and
1011 cpi->maxio = 256 * 1024;
1013 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1016 case XPT_GET_TRAN_SETTINGS:
1018 struct ccb_trans_settings *cts;
1019 struct ccb_trans_settings_sas *sas;
1020 struct ccb_trans_settings_scsi *scsi;
1021 struct mprsas_target *targ;
1024 sas = &cts->xport_specific.sas;
1025 scsi = &cts->proto_specific.scsi;
1027 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1028 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1029 cts->ccb_h.target_id));
1030 targ = &sassc->targets[cts->ccb_h.target_id];
1031 if (targ->handle == 0x0) {
1032 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1036 cts->protocol_version = SCSI_REV_SPC2;
1037 cts->transport = XPORT_SAS;
1038 cts->transport_version = 0;
1040 sas->valid = CTS_SAS_VALID_SPEED;
1041 switch (targ->linkrate) {
1043 sas->bitrate = 150000;
1046 sas->bitrate = 300000;
1049 sas->bitrate = 600000;
1052 sas->bitrate = 1200000;
1058 cts->protocol = PROTO_SCSI;
1059 scsi->valid = CTS_SCSI_VALID_TQ;
1060 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1062 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1065 case XPT_CALC_GEOMETRY:
1066 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1067 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1070 mpr_dprint(sassc->sc, MPR_XINFO,
1071 "mprsas_action XPT_RESET_DEV\n");
1072 mprsas_action_resetdev(sassc, ccb);
1077 mpr_dprint(sassc->sc, MPR_XINFO,
1078 "mprsas_action faking success for abort or reset\n");
1079 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1082 mprsas_action_scsiio(sassc, ccb);
1084 #if __FreeBSD_version >= 900026
1086 mprsas_action_smpio(sassc, ccb);
1090 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1098 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1099 target_id_t target_id, lun_id_t lun_id)
1101 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1102 struct cam_path *path;
1104 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1105 ac_code, target_id, (uintmax_t)lun_id);
1107 if (xpt_create_path(&path, NULL,
1108 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1109 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1114 xpt_async(ac_code, path, NULL);
1115 xpt_free_path(path);
1119 mprsas_complete_all_commands(struct mpr_softc *sc)
1121 struct mpr_command *cm;
1126 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1128 /* complete all commands with a NULL reply */
1129 for (i = 1; i < sc->num_reqs; i++) {
1130 cm = &sc->commands[i];
1131 cm->cm_reply = NULL;
1134 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1135 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1137 if (cm->cm_complete != NULL) {
1138 mprsas_log_command(cm, MPR_RECOVERY,
1139 "completing cm %p state %x ccb %p for diag "
1140 "reset\n", cm, cm->cm_state, cm->cm_ccb);
1141 cm->cm_complete(sc, cm);
1145 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1146 mprsas_log_command(cm, MPR_RECOVERY,
1147 "waking up cm %p state %x ccb %p for diag reset\n",
1148 cm, cm->cm_state, cm->cm_ccb);
1153 if (cm->cm_sc->io_cmds_active != 0) {
1154 cm->cm_sc->io_cmds_active--;
1156 mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: "
1157 "io_cmds_active is out of sync - resynching to "
1161 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1162 /* this should never happen, but if it does, log */
1163 mprsas_log_command(cm, MPR_RECOVERY,
1164 "cm %p state %x flags 0x%x ccb %p during diag "
1165 "reset\n", cm, cm->cm_state, cm->cm_flags,
1172 mprsas_handle_reinit(struct mpr_softc *sc)
1176 /* Go back into startup mode and freeze the simq, so that CAM
1177 * doesn't send any commands until after we've rediscovered all
1178 * targets and found the proper device handles for them.
1180 * After the reset, portenable will trigger discovery, and after all
1181 * discovery-related activities have finished, the simq will be
1184 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1185 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1186 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1187 mprsas_startup_increment(sc->sassc);
1189 /* notify CAM of a bus reset */
1190 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1193 /* complete and cleanup after all outstanding commands */
1194 mprsas_complete_all_commands(sc);
1196 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1197 __func__, sc->sassc->startup_refcount);
1199 /* zero all the target handles, since they may change after the
1200 * reset, and we have to rediscover all the targets and use the new
1203 for (i = 0; i < sc->sassc->maxtargets; i++) {
1204 if (sc->sassc->targets[i].outstanding != 0)
1205 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1206 i, sc->sassc->targets[i].outstanding);
1207 sc->sassc->targets[i].handle = 0x0;
1208 sc->sassc->targets[i].exp_dev_handle = 0x0;
1209 sc->sassc->targets[i].outstanding = 0;
1210 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1214 mprsas_tm_timeout(void *data)
1216 struct mpr_command *tm = data;
1217 struct mpr_softc *sc = tm->cm_sc;
1219 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1221 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1222 "task mgmt %p timed out\n", tm);
1227 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1228 struct mpr_command *tm)
1230 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1231 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1232 unsigned int cm_count = 0;
1233 struct mpr_command *cm;
1234 struct mprsas_target *targ;
1236 callout_stop(&tm->cm_callout);
1238 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1239 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1243 * Currently there should be no way we can hit this case. It only
1244 * happens when we have a failure to allocate chain frames, and
1245 * task management commands don't have S/G lists.
1247 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1248 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1249 "This should not happen!\n", __func__, tm->cm_flags);
1250 mprsas_free_tm(sc, tm);
1254 if (reply == NULL) {
1255 mprsas_log_command(tm, MPR_RECOVERY,
1256 "NULL reset reply for tm %p\n", tm);
1257 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1258 /* this completion was due to a reset, just cleanup */
1260 mprsas_free_tm(sc, tm);
1263 /* we should have gotten a reply. */
1269 mprsas_log_command(tm, MPR_RECOVERY,
1270 "logical unit reset status 0x%x code 0x%x count %u\n",
1271 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1272 le32toh(reply->TerminationCount));
1274 /* See if there are any outstanding commands for this LUN.
1275 * This could be made more efficient by using a per-LU data
1276 * structure of some sort.
1278 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1279 if (cm->cm_lun == tm->cm_lun)
1283 if (cm_count == 0) {
1284 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1285 "logical unit %u finished recovery after reset\n",
1288 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1291 /* we've finished recovery for this logical unit. check and
1292 * see if some other logical unit has a timedout command
1293 * that needs to be processed.
1295 cm = TAILQ_FIRST(&targ->timedout_commands);
1297 mprsas_send_abort(sc, tm, cm);
1301 mprsas_free_tm(sc, tm);
1305 /* if we still have commands for this LUN, the reset
1306 * effectively failed, regardless of the status reported.
1307 * Escalate to a target reset.
1309 mprsas_log_command(tm, MPR_RECOVERY,
1310 "logical unit reset complete for tm %p, but still have %u "
1311 "command(s)\n", tm, cm_count);
1312 mprsas_send_reset(sc, tm,
1313 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1318 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1320 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1321 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1322 struct mprsas_target *targ;
1324 callout_stop(&tm->cm_callout);
1326 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1327 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1331 * Currently there should be no way we can hit this case. It only
1332 * happens when we have a failure to allocate chain frames, and
1333 * task management commands don't have S/G lists.
1335 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1336 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1337 "reset! This should not happen!\n", __func__, tm->cm_flags);
1338 mprsas_free_tm(sc, tm);
1342 if (reply == NULL) {
1343 mprsas_log_command(tm, MPR_RECOVERY,
1344 "NULL reset reply for tm %p\n", tm);
1345 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1346 /* this completion was due to a reset, just cleanup */
1348 mprsas_free_tm(sc, tm);
1351 /* we should have gotten a reply. */
1357 mprsas_log_command(tm, MPR_RECOVERY,
1358 "target reset status 0x%x code 0x%x count %u\n",
1359 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1360 le32toh(reply->TerminationCount));
1362 if (targ->outstanding == 0) {
1363 /* we've finished recovery for this target and all
1364 * of its logical units.
1366 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1367 "recovery finished after target reset\n");
1369 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1373 mprsas_free_tm(sc, tm);
1376 /* after a target reset, if this target still has
1377 * outstanding commands, the reset effectively failed,
1378 * regardless of the status reported. escalate.
1380 mprsas_log_command(tm, MPR_RECOVERY,
1381 "target reset complete for tm %p, but still have %u "
1382 "command(s)\n", tm, targ->outstanding);
1387 #define MPR_RESET_TIMEOUT 30
1390 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1392 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1393 struct mprsas_target *target;
1396 target = tm->cm_targ;
1397 if (target->handle == 0) {
1398 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1399 "%d\n", __func__, target->tid);
1403 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1404 req->DevHandle = htole16(target->handle);
1405 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1406 req->TaskType = type;
1408 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1409 /* XXX Need to handle invalid LUNs */
1410 MPR_SET_LUN(req->LUN, tm->cm_lun);
1411 tm->cm_targ->logical_unit_resets++;
1412 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1413 "sending logical unit reset\n");
1414 tm->cm_complete = mprsas_logical_unit_reset_complete;
1415 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1417 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1419 * Target reset method =
1420 * SAS Hard Link Reset / SATA Link Reset
1422 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1423 tm->cm_targ->target_resets++;
1424 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1425 "sending target reset\n");
1426 tm->cm_complete = mprsas_target_reset_complete;
1427 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1430 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1434 mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1436 if (target->encl_level_valid) {
1437 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1438 "connector name (%4s)\n", target->encl_level,
1439 target->encl_slot, target->connector_name);
1443 tm->cm_desc.HighPriority.RequestFlags =
1444 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1445 tm->cm_complete_data = (void *)tm;
1447 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1448 mprsas_tm_timeout, tm);
1450 err = mpr_map_command(sc, tm);
1452 mprsas_log_command(tm, MPR_RECOVERY,
1453 "error %d sending reset type %u\n", err, type);
1460 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1462 struct mpr_command *cm;
1463 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1464 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1465 struct mprsas_target *targ;
1467 callout_stop(&tm->cm_callout);
1469 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1470 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1474 * Currently there should be no way we can hit this case. It only
1475 * happens when we have a failure to allocate chain frames, and
1476 * task management commands don't have S/G lists.
1478 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1479 mprsas_log_command(tm, MPR_RECOVERY,
1480 "cm_flags = %#x for abort %p TaskMID %u!\n",
1481 tm->cm_flags, tm, le16toh(req->TaskMID));
1482 mprsas_free_tm(sc, tm);
1486 if (reply == NULL) {
1487 mprsas_log_command(tm, MPR_RECOVERY,
1488 "NULL abort reply for tm %p TaskMID %u\n",
1489 tm, le16toh(req->TaskMID));
1490 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1491 /* this completion was due to a reset, just cleanup */
1493 mprsas_free_tm(sc, tm);
1496 /* we should have gotten a reply. */
1502 mprsas_log_command(tm, MPR_RECOVERY,
1503 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1504 le16toh(req->TaskMID),
1505 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1506 le32toh(reply->TerminationCount));
1508 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1510 /* if there are no more timedout commands, we're done with
1511 * error recovery for this target.
1513 mprsas_log_command(tm, MPR_RECOVERY,
1514 "finished recovery after aborting TaskMID %u\n",
1515 le16toh(req->TaskMID));
1518 mprsas_free_tm(sc, tm);
1520 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1521 /* abort success, but we have more timedout commands to abort */
1522 mprsas_log_command(tm, MPR_RECOVERY,
1523 "continuing recovery after aborting TaskMID %u\n",
1524 le16toh(req->TaskMID));
1526 mprsas_send_abort(sc, tm, cm);
1529 /* we didn't get a command completion, so the abort
1530 * failed as far as we're concerned. escalate.
1532 mprsas_log_command(tm, MPR_RECOVERY,
1533 "abort failed for TaskMID %u tm %p\n",
1534 le16toh(req->TaskMID), tm);
1536 mprsas_send_reset(sc, tm,
1537 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1541 #define MPR_ABORT_TIMEOUT 5
1544 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1545 struct mpr_command *cm)
1547 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1548 struct mprsas_target *targ;
1552 if (targ->handle == 0) {
1553 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1554 __func__, cm->cm_ccb->ccb_h.target_id);
1558 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1559 "Aborting command %p\n", cm);
1561 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1562 req->DevHandle = htole16(targ->handle);
1563 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1564 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1566 /* XXX Need to handle invalid LUNs */
1567 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1569 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1572 tm->cm_desc.HighPriority.RequestFlags =
1573 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1574 tm->cm_complete = mprsas_abort_complete;
1575 tm->cm_complete_data = (void *)tm;
1576 tm->cm_targ = cm->cm_targ;
1577 tm->cm_lun = cm->cm_lun;
1579 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1580 mprsas_tm_timeout, tm);
1584 mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1585 __func__, targ->tid);
1586 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1588 err = mpr_map_command(sc, tm);
1590 mprsas_log_command(tm, MPR_RECOVERY,
1591 "error %d sending abort for cm %p SMID %u\n",
1592 err, cm, req->TaskMID);
1597 mprsas_scsiio_timeout(void *data)
1599 struct mpr_softc *sc;
1600 struct mpr_command *cm;
1601 struct mprsas_target *targ;
1603 cm = (struct mpr_command *)data;
1607 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1609 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1612 * Run the interrupt handler to make sure it's not pending. This
1613 * isn't perfect because the command could have already completed
1614 * and been re-used, though this is unlikely.
1616 mpr_intr_locked(sc);
1617 if (cm->cm_state == MPR_CM_STATE_FREE) {
1618 mprsas_log_command(cm, MPR_XINFO,
1619 "SCSI command %p almost timed out\n", cm);
1623 if (cm->cm_ccb == NULL) {
1624 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1631 mprsas_log_command(cm, MPR_ERROR, "command timeout cm %p ccb %p "
1632 "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1634 if (targ->encl_level_valid) {
1635 mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1636 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1637 targ->connector_name);
1640 /* XXX first, check the firmware state, to see if it's still
1641 * operational. if not, do a diag reset.
1643 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1644 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1645 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1647 if (targ->tm != NULL) {
1648 /* target already in recovery, just queue up another
1649 * timedout command to be processed later.
1651 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1652 "processing by tm %p\n", cm, targ->tm);
1654 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1655 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1658 /* start recovery by aborting the first timedout command */
1659 mprsas_send_abort(sc, targ->tm, cm);
1662 /* XXX queue this target up for recovery once a TM becomes
1663 * available. The firmware only has a limited number of
1664 * HighPriority credits for the high priority requests used
1665 * for task management, and we ran out.
1667 * Isilon: don't worry about this for now, since we have
1668 * more credits than disks in an enclosure, and limit
1669 * ourselves to one TM per target for recovery.
1671 mpr_dprint(sc, MPR_RECOVERY,
1672 "timedout cm %p failed to allocate a tm\n", cm);
1677 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1679 MPI2_SCSI_IO_REQUEST *req;
1680 struct ccb_scsiio *csio;
1681 struct mpr_softc *sc;
1682 struct mprsas_target *targ;
1683 struct mprsas_lun *lun;
1684 struct mpr_command *cm;
1685 uint8_t i, lba_byte, *ref_tag_addr;
1686 uint16_t eedp_flags;
1687 uint32_t mpi_control;
1691 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1694 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1695 ("Target %d out of bounds in XPT_SCSI_IO\n",
1696 csio->ccb_h.target_id));
1697 targ = &sassc->targets[csio->ccb_h.target_id];
1698 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1699 if (targ->handle == 0x0) {
1700 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1701 __func__, csio->ccb_h.target_id);
1702 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1706 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1707 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1708 "supported %u\n", __func__, csio->ccb_h.target_id);
1709 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1714 * Sometimes, it is possible to get a command that is not "In
1715 * Progress" and was actually aborted by the upper layer. Check for
1716 * this here and complete the command without error.
1718 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1719 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1720 "target %u\n", __func__, csio->ccb_h.target_id);
1725 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1726 * that the volume has timed out. We want volumes to be enumerated
1727 * until they are deleted/removed, not just failed.
1729 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1730 if (targ->devinfo == 0)
1731 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1733 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1738 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1739 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1740 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1746 * If target has a reset in progress, freeze the devq and return. The
1747 * devq will be released when the TM reset is finished.
1749 if (targ->flags & MPRSAS_TARGET_INRESET) {
1750 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1751 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1752 __func__, targ->tid);
1753 xpt_freeze_devq(ccb->ccb_h.path, 1);
1758 cm = mpr_alloc_command(sc);
1759 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1761 mpr_free_command(sc, cm);
1763 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1764 xpt_freeze_simq(sassc->sim, 1);
1765 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1767 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1768 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1773 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1774 bzero(req, sizeof(*req));
1775 req->DevHandle = htole16(targ->handle);
1776 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1778 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1779 req->SenseBufferLength = MPR_SENSE_LEN;
1781 req->ChainOffset = 0;
1782 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1787 req->DataLength = htole32(csio->dxfer_len);
1788 req->BidirectionalDataLength = 0;
1789 req->IoFlags = htole16(csio->cdb_len);
1792 /* Note: BiDirectional transfers are not supported */
1793 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1795 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1796 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1799 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1800 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1804 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1808 if (csio->cdb_len == 32)
1809 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1811 * It looks like the hardware doesn't require an explicit tag
1812 * number for each transaction. SAM Task Management not supported
1815 switch (csio->tag_action) {
1816 case MSG_HEAD_OF_Q_TAG:
1817 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1819 case MSG_ORDERED_Q_TAG:
1820 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1823 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1825 case CAM_TAG_ACTION_NONE:
1826 case MSG_SIMPLE_Q_TAG:
1828 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1831 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1832 req->Control = htole32(mpi_control);
1834 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1835 mpr_free_command(sc, cm);
1836 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1841 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1842 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1844 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1845 req->IoFlags = htole16(csio->cdb_len);
1848 * Check if EEDP is supported and enabled. If it is then check if the
1849 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1850 * is formatted for EEDP support. If all of this is true, set CDB up
1851 * for EEDP transfer.
1853 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1854 if (sc->eedp_enabled && eedp_flags) {
1855 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1856 if (lun->lun_id == csio->ccb_h.target_lun) {
1861 if ((lun != NULL) && (lun->eedp_formatted)) {
1862 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1863 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1864 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1865 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1866 req->EEDPFlags = htole16(eedp_flags);
1869 * If CDB less than 32, fill in Primary Ref Tag with
1870 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1871 * already there. Also, set protection bit. FreeBSD
1872 * currently does not support CDBs bigger than 16, but
1873 * the code doesn't hurt, and will be here for the
1876 if (csio->cdb_len != 32) {
1877 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1878 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1879 PrimaryReferenceTag;
1880 for (i = 0; i < 4; i++) {
1882 req->CDB.CDB32[lba_byte + i];
1885 req->CDB.EEDP32.PrimaryReferenceTag =
1887 CDB.EEDP32.PrimaryReferenceTag);
1888 req->CDB.EEDP32.PrimaryApplicationTagMask =
1890 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1894 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1895 req->EEDPFlags = htole16(eedp_flags);
1896 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1902 cm->cm_length = csio->dxfer_len;
1903 if (cm->cm_length != 0) {
1905 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1909 cm->cm_sge = &req->SGL;
1910 cm->cm_sglsize = (32 - 24) * 4;
1911 cm->cm_complete = mprsas_scsiio_complete;
1912 cm->cm_complete_data = ccb;
1914 cm->cm_lun = csio->ccb_h.target_lun;
1917 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1918 * and set descriptor type.
1920 if (targ->scsi_req_desc_type ==
1921 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1922 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1923 cm->cm_desc.FastPathSCSIIO.RequestFlags =
1924 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1925 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1927 cm->cm_desc.SCSIIO.RequestFlags =
1928 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1929 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1932 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1933 mprsas_scsiio_timeout, cm, 0);
1936 targ->outstanding++;
1937 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1938 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1940 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1941 __func__, cm, ccb, targ->outstanding);
1943 mpr_map_command(sc, cm);
1948 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1952 switch (response_code) {
1953 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1954 desc = "task management request completed";
1956 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1957 desc = "invalid frame";
1959 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1960 desc = "task management request not supported";
1962 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1963 desc = "task management request failed";
1965 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1966 desc = "task management request succeeded";
1968 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1969 desc = "invalid lun";
1972 desc = "overlapped tag attempted";
1974 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1975 desc = "task queued, however not sent to target";
1981 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1986 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1989 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1990 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1994 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1995 MPI2_IOCSTATUS_MASK;
1996 u8 scsi_state = mpi_reply->SCSIState;
1997 u8 scsi_status = mpi_reply->SCSIStatus;
1998 char *desc_ioc_state = NULL;
1999 char *desc_scsi_status = NULL;
2000 char *desc_scsi_state = sc->tmp_string;
2001 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2003 if (log_info == 0x31170000)
2006 switch (ioc_status) {
2007 case MPI2_IOCSTATUS_SUCCESS:
2008 desc_ioc_state = "success";
2010 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2011 desc_ioc_state = "invalid function";
2013 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2014 desc_ioc_state = "scsi recovered error";
2016 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2017 desc_ioc_state = "scsi invalid dev handle";
2019 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2020 desc_ioc_state = "scsi device not there";
2022 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2023 desc_ioc_state = "scsi data overrun";
2025 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2026 desc_ioc_state = "scsi data underrun";
2028 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2029 desc_ioc_state = "scsi io data error";
2031 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2032 desc_ioc_state = "scsi protocol error";
2034 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2035 desc_ioc_state = "scsi task terminated";
2037 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2038 desc_ioc_state = "scsi residual mismatch";
2040 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2041 desc_ioc_state = "scsi task mgmt failed";
2043 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2044 desc_ioc_state = "scsi ioc terminated";
2046 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2047 desc_ioc_state = "scsi ext terminated";
2049 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2050 desc_ioc_state = "eedp guard error";
2052 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2053 desc_ioc_state = "eedp ref tag error";
2055 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2056 desc_ioc_state = "eedp app tag error";
2059 desc_ioc_state = "unknown";
2063 switch (scsi_status) {
2064 case MPI2_SCSI_STATUS_GOOD:
2065 desc_scsi_status = "good";
2067 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2068 desc_scsi_status = "check condition";
2070 case MPI2_SCSI_STATUS_CONDITION_MET:
2071 desc_scsi_status = "condition met";
2073 case MPI2_SCSI_STATUS_BUSY:
2074 desc_scsi_status = "busy";
2076 case MPI2_SCSI_STATUS_INTERMEDIATE:
2077 desc_scsi_status = "intermediate";
2079 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2080 desc_scsi_status = "intermediate condmet";
2082 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2083 desc_scsi_status = "reservation conflict";
2085 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2086 desc_scsi_status = "command terminated";
2088 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2089 desc_scsi_status = "task set full";
2091 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2092 desc_scsi_status = "aca active";
2094 case MPI2_SCSI_STATUS_TASK_ABORTED:
2095 desc_scsi_status = "task aborted";
2098 desc_scsi_status = "unknown";
2102 desc_scsi_state[0] = '\0';
2104 desc_scsi_state = " ";
2105 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2106 strcat(desc_scsi_state, "response info ");
2107 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2108 strcat(desc_scsi_state, "state terminated ");
2109 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2110 strcat(desc_scsi_state, "no status ");
2111 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2112 strcat(desc_scsi_state, "autosense failed ");
2113 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2114 strcat(desc_scsi_state, "autosense valid ");
2116 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2117 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2118 if (targ->encl_level_valid) {
2119 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2120 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2121 targ->connector_name);
2123 /* We can add more detail about underflow data here
2126 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2127 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2128 desc_scsi_state, scsi_state);
2130 if (sc->mpr_debug & MPR_XINFO &&
2131 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2132 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2133 scsi_sense_print(csio);
2134 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2137 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2138 response_info = le32toh(mpi_reply->ResponseInfo);
2139 response_bytes = (u8 *)&response_info;
2140 mpr_response_code(sc,response_bytes[0]);
2145 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2147 MPI2_SCSI_IO_REPLY *rep;
2149 struct ccb_scsiio *csio;
2150 struct mprsas_softc *sassc;
2151 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2152 u8 *TLR_bits, TLR_on;
2155 struct mprsas_target *target;
2156 target_id_t target_id;
2159 mpr_dprint(sc, MPR_TRACE,
2160 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2161 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2162 cm->cm_targ->outstanding);
2164 callout_stop(&cm->cm_callout);
2165 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2168 ccb = cm->cm_complete_data;
2170 target_id = csio->ccb_h.target_id;
2171 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2173 * XXX KDM if the chain allocation fails, does it matter if we do
2174 * the sync and unload here? It is simpler to do it in every case,
2175 * assuming it doesn't cause problems.
2177 if (cm->cm_data != NULL) {
2178 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2179 dir = BUS_DMASYNC_POSTREAD;
2180 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2181 dir = BUS_DMASYNC_POSTWRITE;
2182 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2183 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2186 cm->cm_targ->completed++;
2187 cm->cm_targ->outstanding--;
2188 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2189 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2191 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2192 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2193 if (cm->cm_reply != NULL)
2194 mprsas_log_command(cm, MPR_RECOVERY,
2195 "completed timedout cm %p ccb %p during recovery "
2196 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2197 le16toh(rep->IOCStatus), rep->SCSIStatus,
2198 rep->SCSIState, le32toh(rep->TransferCount));
2200 mprsas_log_command(cm, MPR_RECOVERY,
2201 "completed timedout cm %p ccb %p during recovery\n",
2203 } else if (cm->cm_targ->tm != NULL) {
2204 if (cm->cm_reply != NULL)
2205 mprsas_log_command(cm, MPR_RECOVERY,
2206 "completed cm %p ccb %p during recovery "
2207 "ioc %x scsi %x state %x xfer %u\n",
2208 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2209 rep->SCSIStatus, rep->SCSIState,
2210 le32toh(rep->TransferCount));
2212 mprsas_log_command(cm, MPR_RECOVERY,
2213 "completed cm %p ccb %p during recovery\n",
2215 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2216 mprsas_log_command(cm, MPR_RECOVERY,
2217 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2220 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2222 * We ran into an error after we tried to map the command,
2223 * so we're getting a callback without queueing the command
2224 * to the hardware. So we set the status here, and it will
2225 * be retained below. We'll go through the "fast path",
2226 * because there can be no reply when we haven't actually
2227 * gone out to the hardware.
2229 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2232 * Currently the only error included in the mask is
2233 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2234 * chain frames. We need to freeze the queue until we get
2235 * a command that completed without this error, which will
2236 * hopefully have some chain frames attached that we can
2237 * use. If we wanted to get smarter about it, we would
2238 * only unfreeze the queue in this condition when we're
2239 * sure that we're getting some chain frames back. That's
2240 * probably unnecessary.
2242 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2243 xpt_freeze_simq(sassc->sim, 1);
2244 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2245 mpr_dprint(sc, MPR_INFO, "Error sending command, "
2246 "freezing SIM queue\n");
2251 * If this is a Start Stop Unit command and it was issued by the driver
2252 * during shutdown, decrement the refcount to account for all of the
2253 * commands that were sent. All SSU commands should be completed before
2254 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2257 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2258 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2262 /* Take the fast path to completion */
2263 if (cm->cm_reply == NULL) {
2264 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2265 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2266 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2268 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2269 csio->scsi_status = SCSI_STATUS_OK;
2271 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2272 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2273 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2274 mpr_dprint(sc, MPR_XINFO,
2275 "Unfreezing SIM queue\n");
2280 * There are two scenarios where the status won't be
2281 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2282 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2284 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2286 * Freeze the dev queue so that commands are
2287 * executed in the correct order after error
2290 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2291 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2293 mpr_free_command(sc, cm);
2298 mprsas_log_command(cm, MPR_XINFO,
2299 "ioc %x scsi %x state %x xfer %u\n",
2300 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2301 le32toh(rep->TransferCount));
2303 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2304 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2305 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2307 case MPI2_IOCSTATUS_SUCCESS:
2308 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2310 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2311 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2312 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2314 /* Completion failed at the transport level. */
2315 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2316 MPI2_SCSI_STATE_TERMINATED)) {
2317 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2321 /* In a modern packetized environment, an autosense failure
2322 * implies that there's not much else that can be done to
2323 * recover the command.
2325 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2326 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2331 * CAM doesn't care about SAS Response Info data, but if this is
2332 * the state check if TLR should be done. If not, clear the
2333 * TLR_bits for the target.
2335 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2336 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2337 == MPR_SCSI_RI_INVALID_FRAME)) {
2338 sc->mapping_table[target_id].TLR_bits =
2339 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2343 * Intentionally override the normal SCSI status reporting
2344 * for these two cases. These are likely to happen in a
2345 * multi-initiator environment, and we want to make sure that
2346 * CAM retries these commands rather than fail them.
2348 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2349 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2350 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2354 /* Handle normal status and sense */
2355 csio->scsi_status = rep->SCSIStatus;
2356 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2357 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2359 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2361 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2362 int sense_len, returned_sense_len;
2364 returned_sense_len = min(le32toh(rep->SenseCount),
2365 sizeof(struct scsi_sense_data));
2366 if (returned_sense_len < csio->sense_len)
2367 csio->sense_resid = csio->sense_len -
2370 csio->sense_resid = 0;
2372 sense_len = min(returned_sense_len,
2373 csio->sense_len - csio->sense_resid);
2374 bzero(&csio->sense_data, sizeof(csio->sense_data));
2375 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2376 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2380 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2381 * and it's page code 0 (Supported Page List), and there is
2382 * inquiry data, and this is for a sequential access device, and
2383 * the device is an SSP target, and TLR is supported by the
2384 * controller, turn the TLR_bits value ON if page 0x90 is
2387 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2388 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2389 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2390 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2391 (csio->data_ptr != NULL) &&
2392 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2393 (sc->control_TLR) &&
2394 (sc->mapping_table[target_id].device_info &
2395 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2396 vpd_list = (struct scsi_vpd_supported_page_list *)
2398 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2399 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2400 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2401 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2402 csio->cdb_io.cdb_bytes[4];
2403 alloc_len -= csio->resid;
2404 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2405 if (vpd_list->list[i] == 0x90) {
2413 * If this is a SATA direct-access end device, mark it so that
2414 * a SCSI StartStopUnit command will be sent to it when the
2415 * driver is being shutdown.
2417 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2418 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2419 (sc->mapping_table[target_id].device_info &
2420 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2421 ((sc->mapping_table[target_id].device_info &
2422 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2423 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2424 target = &sassc->targets[target_id];
2425 target->supports_SSU = TRUE;
2426 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2430 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2431 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2433 * If devinfo is 0 this will be a volume. In that case don't
2434 * tell CAM that the volume is not there. We want volumes to
2435 * be enumerated until they are deleted/removed, not just
2438 if (cm->cm_targ->devinfo == 0)
2439 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2441 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2443 case MPI2_IOCSTATUS_INVALID_SGL:
2444 mpr_print_scsiio_cmd(sc, cm);
2445 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2447 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2449 * This is one of the responses that comes back when an I/O
2450 * has been aborted. If it is because of a timeout that we
2451 * initiated, just set the status to CAM_CMD_TIMEOUT.
2452 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2453 * command is the same (it gets retried, subject to the
2454 * retry counter), the only difference is what gets printed
2457 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2458 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2460 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2462 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2463 /* resid is ignored for this condition */
2465 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2467 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2468 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2470 * Since these are generally external (i.e. hopefully
2471 * transient transport-related) errors, retry these without
2472 * decrementing the retry count.
2474 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2475 mprsas_log_command(cm, MPR_INFO,
2476 "terminated ioc %x scsi %x state %x xfer %u\n",
2477 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2478 le32toh(rep->TransferCount));
2480 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2481 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2482 case MPI2_IOCSTATUS_INVALID_VPID:
2483 case MPI2_IOCSTATUS_INVALID_FIELD:
2484 case MPI2_IOCSTATUS_INVALID_STATE:
2485 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2486 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2487 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2488 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2489 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2491 mprsas_log_command(cm, MPR_XINFO,
2492 "completed ioc %x scsi %x state %x xfer %u\n",
2493 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2494 le32toh(rep->TransferCount));
2495 csio->resid = cm->cm_length;
2496 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2500 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2502 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2503 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2504 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2505 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2509 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2510 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2511 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2514 mpr_free_command(sc, cm);
2518 #if __FreeBSD_version >= 900026
2520 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2522 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2523 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2527 ccb = cm->cm_complete_data;
2530 * Currently there should be no way we can hit this case. It only
2531 * happens when we have a failure to allocate chain frames, and SMP
2532 * commands require two S/G elements only. That should be handled
2533 * in the standard request size.
2535 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2536 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2537 "request!\n", __func__, cm->cm_flags);
2538 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2542 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2544 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2545 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2549 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2550 sasaddr = le32toh(req->SASAddress.Low);
2551 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2553 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2554 MPI2_IOCSTATUS_SUCCESS ||
2555 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2556 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2557 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2558 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2562 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2563 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2565 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2566 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2568 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2572 * We sync in both directions because we had DMAs in the S/G list
2573 * in both directions.
2575 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2576 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2577 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2578 mpr_free_command(sc, cm);
2583 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2586 struct mpr_command *cm;
2587 uint8_t *request, *response;
2588 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2589 struct mpr_softc *sc;
2597 #if (__FreeBSD_version >= 1000028) || \
2598 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2599 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2600 case CAM_DATA_PADDR:
2601 case CAM_DATA_SG_PADDR:
2603 * XXX We don't yet support physical addresses here.
2605 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2606 "supported\n", __func__);
2607 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2612 * The chip does not support more than one buffer for the
2613 * request or response.
2615 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2616 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2617 mpr_dprint(sc, MPR_ERROR,
2618 "%s: multiple request or response buffer segments "
2619 "not supported for SMP\n", __func__);
2620 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2626 * The CAM_SCATTER_VALID flag was originally implemented
2627 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2628 * We have two. So, just take that flag to mean that we
2629 * might have S/G lists, and look at the S/G segment count
2630 * to figure out whether that is the case for each individual
2633 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2634 bus_dma_segment_t *req_sg;
2636 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2637 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2639 request = ccb->smpio.smp_request;
2641 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2642 bus_dma_segment_t *rsp_sg;
2644 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2645 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2647 response = ccb->smpio.smp_response;
2649 case CAM_DATA_VADDR:
2650 request = ccb->smpio.smp_request;
2651 response = ccb->smpio.smp_response;
2654 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2658 #else /* __FreeBSD_version < 1000028 */
2660 * XXX We don't yet support physical addresses here.
2662 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2663 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2664 "supported\n", __func__);
2665 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2671 * If the user wants to send an S/G list, check to make sure they
2672 * have single buffers.
2674 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2676 * The chip does not support more than one buffer for the
2677 * request or response.
2679 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2680 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2681 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2682 "response buffer segments not supported for SMP\n",
2684 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2690 * The CAM_SCATTER_VALID flag was originally implemented
2691 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2692 * We have two. So, just take that flag to mean that we
2693 * might have S/G lists, and look at the S/G segment count
2694 * to figure out whether that is the case for each individual
2697 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2698 bus_dma_segment_t *req_sg;
2700 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2701 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2703 request = ccb->smpio.smp_request;
2705 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2706 bus_dma_segment_t *rsp_sg;
2708 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2709 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2711 response = ccb->smpio.smp_response;
2713 request = ccb->smpio.smp_request;
2714 response = ccb->smpio.smp_response;
2716 #endif /* __FreeBSD_version < 1000028 */
2718 cm = mpr_alloc_command(sc);
2720 mpr_dprint(sc, MPR_ERROR,
2721 "%s: cannot allocate command\n", __func__);
2722 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2727 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2728 bzero(req, sizeof(*req));
2729 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2731 /* Allow the chip to use any route to this SAS address. */
2732 req->PhysicalPort = 0xff;
2734 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2736 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2738 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2739 "%#jx\n", __func__, (uintmax_t)sasaddr);
2741 mpr_init_sge(cm, req, &req->SGL);
2744 * Set up a uio to pass into mpr_map_command(). This allows us to
2745 * do one map command, and one busdma call in there.
2747 cm->cm_uio.uio_iov = cm->cm_iovec;
2748 cm->cm_uio.uio_iovcnt = 2;
2749 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2752 * The read/write flag isn't used by busdma, but set it just in
2753 * case. This isn't exactly accurate, either, since we're going in
2756 cm->cm_uio.uio_rw = UIO_WRITE;
2758 cm->cm_iovec[0].iov_base = request;
2759 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2760 cm->cm_iovec[1].iov_base = response;
2761 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2763 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2764 cm->cm_iovec[1].iov_len;
2767 * Trigger a warning message in mpr_data_cb() for the user if we
2768 * wind up exceeding two S/G segments. The chip expects one
2769 * segment for the request and another for the response.
2771 cm->cm_max_segs = 2;
2773 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2774 cm->cm_complete = mprsas_smpio_complete;
2775 cm->cm_complete_data = ccb;
2778 * Tell the mapping code that we're using a uio, and that this is
2779 * an SMP passthrough request. There is a little special-case
2780 * logic there (in mpr_data_cb()) to handle the bidirectional
2783 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2784 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2786 /* The chip data format is little endian. */
2787 req->SASAddress.High = htole32(sasaddr >> 32);
2788 req->SASAddress.Low = htole32(sasaddr);
2791 * XXX Note that we don't have a timeout/abort mechanism here.
2792 * From the manual, it looks like task management requests only
2793 * work for SCSI IO and SATA passthrough requests. We may need to
2794 * have a mechanism to retry requests in the event of a chip reset
2795 * at least. Hopefully the chip will insure that any errors short
2796 * of that are relayed back to the driver.
2798 error = mpr_map_command(sc, cm);
2799 if ((error != 0) && (error != EINPROGRESS)) {
2800 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2801 "mpr_map_command()\n", __func__, error);
2808 mpr_free_command(sc, cm);
2809 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2815 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2817 struct mpr_softc *sc;
2818 struct mprsas_target *targ;
2819 uint64_t sasaddr = 0;
2824 * Make sure the target exists.
2826 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2827 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2828 targ = &sassc->targets[ccb->ccb_h.target_id];
2829 if (targ->handle == 0x0) {
2830 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2831 __func__, ccb->ccb_h.target_id);
2832 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2838 * If this device has an embedded SMP target, we'll talk to it
2840 * figure out what the expander's address is.
2842 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2843 sasaddr = targ->sasaddr;
2846 * If we don't have a SAS address for the expander yet, try
2847 * grabbing it from the page 0x83 information cached in the
2848 * transport layer for this target. LSI expanders report the
2849 * expander SAS address as the port-associated SAS address in
2850 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2853 * XXX KDM disable this for now, but leave it commented out so that
2854 * it is obvious that this is another possible way to get the SAS
2857 * The parent handle method below is a little more reliable, and
2858 * the other benefit is that it works for devices other than SES
2859 * devices. So you can send a SMP request to a da(4) device and it
2860 * will get routed to the expander that device is attached to.
2861 * (Assuming the da(4) device doesn't contain an SMP target...)
2865 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2869 * If we still don't have a SAS address for the expander, look for
2870 * the parent device of this device, which is probably the expander.
2873 #ifdef OLD_MPR_PROBE
2874 struct mprsas_target *parent_target;
2877 if (targ->parent_handle == 0x0) {
2878 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2879 "a valid parent handle!\n", __func__, targ->handle);
2880 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2883 #ifdef OLD_MPR_PROBE
2884 parent_target = mprsas_find_target_by_handle(sassc, 0,
2885 targ->parent_handle);
2887 if (parent_target == NULL) {
2888 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2889 "a valid parent target!\n", __func__, targ->handle);
2890 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2894 if ((parent_target->devinfo &
2895 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2896 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2897 "does not have an SMP target!\n", __func__,
2898 targ->handle, parent_target->handle);
2899 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2903 sasaddr = parent_target->sasaddr;
2904 #else /* OLD_MPR_PROBE */
2905 if ((targ->parent_devinfo &
2906 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2907 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2908 "does not have an SMP target!\n", __func__,
2909 targ->handle, targ->parent_handle);
2910 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2914 if (targ->parent_sasaddr == 0x0) {
2915 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2916 "%d does not have a valid SAS address!\n", __func__,
2917 targ->handle, targ->parent_handle);
2918 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2922 sasaddr = targ->parent_sasaddr;
2923 #endif /* OLD_MPR_PROBE */
2928 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2929 "handle %d\n", __func__, targ->handle);
2930 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2933 mprsas_send_smpcmd(sassc, ccb, sasaddr);
2941 #endif //__FreeBSD_version >= 900026
2944 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2946 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2947 struct mpr_softc *sc;
2948 struct mpr_command *tm;
2949 struct mprsas_target *targ;
2951 MPR_FUNCTRACE(sassc->sc);
2952 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2954 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2955 ("Target %d out of bounds in XPT_RESET_DEV\n",
2956 ccb->ccb_h.target_id));
2958 tm = mpr_alloc_command(sc);
2960 mpr_dprint(sc, MPR_ERROR,
2961 "command alloc failure in mprsas_action_resetdev\n");
2962 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2967 targ = &sassc->targets[ccb->ccb_h.target_id];
2968 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2969 req->DevHandle = htole16(targ->handle);
2970 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2971 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2973 /* SAS Hard Link Reset / SATA Link Reset */
2974 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2977 tm->cm_desc.HighPriority.RequestFlags =
2978 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2979 tm->cm_complete = mprsas_resetdev_complete;
2980 tm->cm_complete_data = ccb;
2982 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
2983 __func__, targ->tid);
2985 targ->flags |= MPRSAS_TARGET_INRESET;
2987 mpr_map_command(sc, tm);
2991 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2993 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2997 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2999 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3000 ccb = tm->cm_complete_data;
3003 * Currently there should be no way we can hit this case. It only
3004 * happens when we have a failure to allocate chain frames, and
3005 * task management commands don't have S/G lists.
3007 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3008 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3010 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3012 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3013 "handle %#04x! This should not happen!\n", __func__,
3014 tm->cm_flags, req->DevHandle);
3015 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3019 mpr_dprint(sc, MPR_XINFO,
3020 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3021 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3023 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3024 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3025 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3029 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3033 mprsas_free_tm(sc, tm);
3038 mprsas_poll(struct cam_sim *sim)
3040 struct mprsas_softc *sassc;
3042 sassc = cam_sim_softc(sim);
3044 if (sassc->sc->mpr_debug & MPR_TRACE) {
3045 /* frequent debug messages during a panic just slow
3046 * everything down too much.
3048 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3050 sassc->sc->mpr_debug &= ~MPR_TRACE;
3053 mpr_intr_locked(sassc->sc);
3057 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3060 struct mpr_softc *sc;
3062 sc = (struct mpr_softc *)callback_arg;
3065 #if (__FreeBSD_version >= 1000006) || \
3066 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3067 case AC_ADVINFO_CHANGED: {
3068 struct mprsas_target *target;
3069 struct mprsas_softc *sassc;
3070 struct scsi_read_capacity_data_long rcap_buf;
3071 struct ccb_dev_advinfo cdai;
3072 struct mprsas_lun *lun;
3077 buftype = (uintptr_t)arg;
3083 * We're only interested in read capacity data changes.
3085 if (buftype != CDAI_TYPE_RCAPLONG)
3089 * See the comment in mpr_attach_sas() for a detailed
3090 * explanation. In these versions of FreeBSD we register
3091 * for all events and filter out the events that don't
3094 #if (__FreeBSD_version < 1000703) || \
3095 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3096 if (xpt_path_path_id(path) != sassc->sim->path_id)
3101 * We should have a handle for this, but check to make sure.
3103 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3104 ("Target %d out of bounds in mprsas_async\n",
3105 xpt_path_target_id(path)));
3106 target = &sassc->targets[xpt_path_target_id(path)];
3107 if (target->handle == 0)
3110 lunid = xpt_path_lun_id(path);
3112 SLIST_FOREACH(lun, &target->luns, lun_link) {
3113 if (lun->lun_id == lunid) {
3119 if (found_lun == 0) {
3120 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3123 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3124 "LUN for EEDP support.\n");
3127 lun->lun_id = lunid;
3128 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3131 bzero(&rcap_buf, sizeof(rcap_buf));
3132 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3133 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3134 cdai.ccb_h.flags = CAM_DIR_IN;
3135 cdai.buftype = CDAI_TYPE_RCAPLONG;
3136 #if (__FreeBSD_version >= 1100061) || \
3137 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3138 cdai.flags = CDAI_FLAG_NONE;
3142 cdai.bufsiz = sizeof(rcap_buf);
3143 cdai.buf = (uint8_t *)&rcap_buf;
3144 xpt_action((union ccb *)&cdai);
3145 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3146 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3148 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3149 && (rcap_buf.prot & SRC16_PROT_EN)) {
3150 lun->eedp_formatted = TRUE;
3151 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3153 lun->eedp_formatted = FALSE;
3154 lun->eedp_block_size = 0;
3159 case AC_FOUND_DEVICE: {
3160 struct ccb_getdev *cgd;
3163 * See the comment in mpr_attach_sas() for a detailed
3164 * explanation. In these versions of FreeBSD we register
3165 * for all events and filter out the events that don't
3168 #if (__FreeBSD_version < 1000703) || \
3169 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3170 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3175 #if (__FreeBSD_version < 901503) || \
3176 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3177 mprsas_check_eedp(sc, path, cgd);
3186 #if (__FreeBSD_version < 901503) || \
3187 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3189 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3190 struct ccb_getdev *cgd)
3192 struct mprsas_softc *sassc = sc->sassc;
3193 struct ccb_scsiio *csio;
3194 struct scsi_read_capacity_16 *scsi_cmd;
3195 struct scsi_read_capacity_eedp *rcap_buf;
3197 target_id_t targetid;
3200 struct cam_path *local_path;
3201 struct mprsas_target *target;
3202 struct mprsas_lun *lun;
3206 pathid = cam_sim_path(sassc->sim);
3207 targetid = xpt_path_target_id(path);
3208 lunid = xpt_path_lun_id(path);
3210 KASSERT(targetid < sassc->maxtargets,
3211 ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3212 target = &sassc->targets[targetid];
3213 if (target->handle == 0x0)
3217 * Determine if the device is EEDP capable.
3219 * If this flag is set in the inquiry data, the device supports
3220 * protection information, and must support the 16 byte read capacity
3221 * command, otherwise continue without sending read cap 16
3223 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3227 * Issue a READ CAPACITY 16 command. This info is used to determine if
3228 * the LUN is formatted for EEDP support.
3230 ccb = xpt_alloc_ccb_nowait();
3232 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3237 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3239 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3246 * If LUN is already in list, don't create a new one.
3249 SLIST_FOREACH(lun, &target->luns, lun_link) {
3250 if (lun->lun_id == lunid) {
3256 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3259 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3261 xpt_free_path(local_path);
3265 lun->lun_id = lunid;
3266 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3269 xpt_path_string(local_path, path_str, sizeof(path_str));
3270 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3271 path_str, target->handle);
3274 * Issue a READ CAPACITY 16 command for the LUN. The
3275 * mprsas_read_cap_done function will load the read cap info into the
3278 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3280 if (rcap_buf == NULL) {
3281 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3282 "buffer for EEDP support.\n");
3283 xpt_free_path(ccb->ccb_h.path);
3287 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3289 csio->ccb_h.func_code = XPT_SCSI_IO;
3290 csio->ccb_h.flags = CAM_DIR_IN;
3291 csio->ccb_h.retry_count = 4;
3292 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3293 csio->ccb_h.timeout = 60000;
3294 csio->data_ptr = (uint8_t *)rcap_buf;
3295 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3296 csio->sense_len = MPR_SENSE_LEN;
3297 csio->cdb_len = sizeof(*scsi_cmd);
3298 csio->tag_action = MSG_SIMPLE_Q_TAG;
3300 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3301 bzero(scsi_cmd, sizeof(*scsi_cmd));
3302 scsi_cmd->opcode = 0x9E;
3303 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3304 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3306 ccb->ccb_h.ppriv_ptr1 = sassc;
3311 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3313 struct mprsas_softc *sassc;
3314 struct mprsas_target *target;
3315 struct mprsas_lun *lun;
3316 struct scsi_read_capacity_eedp *rcap_buf;
3318 if (done_ccb == NULL)
3321 /* Driver need to release devq, it Scsi command is
3322 * generated by driver internally.
3323 * Currently there is a single place where driver
3324 * calls scsi command internally. In future if driver
3325 * calls more scsi command internally, it needs to release
3326 * devq internally, since those command will not go back to
3329 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3330 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3331 xpt_release_devq(done_ccb->ccb_h.path,
3332 /*count*/ 1, /*run_queue*/TRUE);
3335 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3338 * Get the LUN ID for the path and look it up in the LUN list for the
3341 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3342 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3343 ("Target %d out of bounds in mprsas_read_cap_done\n",
3344 done_ccb->ccb_h.target_id));
3345 target = &sassc->targets[done_ccb->ccb_h.target_id];
3346 SLIST_FOREACH(lun, &target->luns, lun_link) {
3347 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3351 * Got the LUN in the target's LUN list. Fill it in with EEDP
3352 * info. If the READ CAP 16 command had some SCSI error (common
3353 * if command is not supported), mark the lun as not supporting
3354 * EEDP and set the block size to 0.
3356 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3357 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3358 lun->eedp_formatted = FALSE;
3359 lun->eedp_block_size = 0;
3363 if (rcap_buf->protect & 0x01) {
3364 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3365 "%d is formatted for EEDP support.\n",
3366 done_ccb->ccb_h.target_lun,
3367 done_ccb->ccb_h.target_id);
3368 lun->eedp_formatted = TRUE;
3369 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3374 // Finished with this CCB and path.
3375 free(rcap_buf, M_MPR);
3376 xpt_free_path(done_ccb->ccb_h.path);
3377 xpt_free_ccb(done_ccb);
3379 #endif /* (__FreeBSD_version < 901503) || \
3380 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3383 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3384 struct mprsas_target *target, lun_id_t lun_id)
3390 * Set the INRESET flag for this target so that no I/O will be sent to
3391 * the target until the reset has completed. If an I/O request does
3392 * happen, the devq will be frozen. The CCB holds the path which is
3393 * used to release the devq. The devq is released and the CCB is freed
3394 * when the TM completes.
3396 ccb = xpt_alloc_ccb_nowait();
3398 path_id = cam_sim_path(sc->sassc->sim);
3399 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3400 target->tid, lun_id) != CAM_REQ_CMP) {
3404 tm->cm_targ = target;
3405 target->flags |= MPRSAS_TARGET_INRESET;
3411 mprsas_startup(struct mpr_softc *sc)
3414 * Send the port enable message and set the wait_for_port_enable flag.
3415 * This flag helps to keep the simq frozen until all discovery events
3418 sc->wait_for_port_enable = 1;
3419 mprsas_send_portenable(sc);
3424 mprsas_send_portenable(struct mpr_softc *sc)
3426 MPI2_PORT_ENABLE_REQUEST *request;
3427 struct mpr_command *cm;
3431 if ((cm = mpr_alloc_command(sc)) == NULL)
3433 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3434 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3435 request->MsgFlags = 0;
3437 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3438 cm->cm_complete = mprsas_portenable_complete;
3442 mpr_map_command(sc, cm);
3443 mpr_dprint(sc, MPR_XINFO,
3444 "mpr_send_portenable finished cm %p req %p complete %p\n",
3445 cm, cm->cm_req, cm->cm_complete);
3450 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3452 MPI2_PORT_ENABLE_REPLY *reply;
3453 struct mprsas_softc *sassc;
3459 * Currently there should be no way we can hit this case. It only
3460 * happens when we have a failure to allocate chain frames, and
3461 * port enable commands don't have S/G lists.
3463 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3464 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3465 "This should not happen!\n", __func__, cm->cm_flags);
3468 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3470 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3471 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3472 MPI2_IOCSTATUS_SUCCESS)
3473 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3475 mpr_free_command(sc, cm);
3476 if (sc->mpr_ich.ich_arg != NULL) {
3477 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3478 config_intrhook_disestablish(&sc->mpr_ich);
3479 sc->mpr_ich.ich_arg = NULL;
3483 * Done waiting for port enable to complete. Decrement the refcount.
3484 * If refcount is 0, discovery is complete and a rescan of the bus can
3487 sc->wait_for_port_enable = 0;
3488 sc->port_enable_complete = 1;
3489 wakeup(&sc->port_enable_complete);
3490 mprsas_startup_decrement(sassc);
3494 mprsas_check_id(struct mprsas_softc *sassc, int id)
3496 struct mpr_softc *sc = sassc->sc;
3500 ids = &sc->exclude_ids[0];
3501 while((name = strsep(&ids, ",")) != NULL) {
3502 if (name[0] == '\0')
3504 if (strtol(name, NULL, 0) == (long)id)
3512 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3514 struct mprsas_softc *sassc;
3515 struct mprsas_lun *lun, *lun_tmp;
3516 struct mprsas_target *targ;
3521 * The number of targets is based on IOC Facts, so free all of
3522 * the allocated LUNs for each target and then the target buffer
3525 for (i=0; i< maxtargets; i++) {
3526 targ = &sassc->targets[i];
3527 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3531 free(sassc->targets, M_MPR);
3533 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3534 M_MPR, M_WAITOK|M_ZERO);
3535 if (!sassc->targets) {
3536 panic("%s failed to alloc targets with error %d\n",