2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2015 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for Avago Technologies (LSI) MPT3 */
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
75 #include <dev/mpr/mpi/mpi2_type.h>
76 #include <dev/mpr/mpi/mpi2.h>
77 #include <dev/mpr/mpi/mpi2_ioc.h>
78 #include <dev/mpr/mpi/mpi2_sas.h>
79 #include <dev/mpr/mpi/mpi2_cnfg.h>
80 #include <dev/mpr/mpi/mpi2_init.h>
81 #include <dev/mpr/mpi/mpi2_tool.h>
82 #include <dev/mpr/mpr_ioctl.h>
83 #include <dev/mpr/mprvar.h>
84 #include <dev/mpr/mpr_table.h>
85 #include <dev/mpr/mpr_sas.h>
87 #define MPRSAS_DISCOVERY_TIMEOUT 20
88 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
91 * static array to check SCSI OpCode for EEDP protection bits
93 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
94 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
96 static uint8_t op_code_prot[256] = {
97 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
99 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
106 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
115 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
117 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
118 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
119 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
120 static void mprsas_poll(struct cam_sim *sim);
121 static void mprsas_scsiio_timeout(void *data);
122 static void mprsas_abort_complete(struct mpr_softc *sc,
123 struct mpr_command *cm);
124 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
125 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
126 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
127 static void mprsas_resetdev_complete(struct mpr_softc *,
128 struct mpr_command *);
129 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130 struct mpr_command *cm);
131 static void mprsas_async(void *callback_arg, uint32_t code,
132 struct cam_path *path, void *arg);
133 #if (__FreeBSD_version < 901503) || \
134 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
135 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
136 struct ccb_getdev *cgd);
137 static void mprsas_read_cap_done(struct cam_periph *periph,
138 union ccb *done_ccb);
140 static int mprsas_send_portenable(struct mpr_softc *sc);
141 static void mprsas_portenable_complete(struct mpr_softc *sc,
142 struct mpr_command *cm);
144 #if __FreeBSD_version >= 900026
145 static void mprsas_smpio_complete(struct mpr_softc *sc,
146 struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc,
148 union ccb *ccb, uint64_t sasaddr);
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
156 struct mprsas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mprsas_startup_increment(struct mprsas_softc *sassc)
178 MPR_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mpr_dprint(sassc->sc, MPR_INIT,
184 "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
224 mprsas_rescan_target(sassc->sc, NULL);
227 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 sassc->startup_refcount);
232 /* The firmware requires us to stop sending commands when we're doing task
233 * management, so refcount the TMs and keep the simq frozen when any are in
237 mprsas_alloc_tm(struct mpr_softc *sc)
239 struct mpr_command *tm;
242 tm = mpr_alloc_high_priority_command(sc);
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
254 * For TM's the devq is frozen for the device. Unfreeze it here and
255 * free the resources used for freezing the devq. Must clear the
256 * INRESET flag as well or scsi I/O will not work.
258 if (tm->cm_targ != NULL) {
259 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
264 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
265 xpt_free_path(tm->cm_ccb->ccb_h.path);
266 xpt_free_ccb(tm->cm_ccb);
269 mpr_free_high_priority_command(sc, tm);
273 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
275 struct mprsas_softc *sassc = sc->sassc;
277 target_id_t targetid;
281 pathid = cam_sim_path(sassc->sim);
283 targetid = CAM_TARGET_WILDCARD;
285 targetid = targ - sassc->targets;
288 * Allocate a CCB and schedule a rescan.
290 ccb = xpt_alloc_ccb_nowait();
292 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
296 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
297 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
298 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
303 if (targetid == CAM_TARGET_WILDCARD)
304 ccb->ccb_h.func_code = XPT_SCAN_BUS;
306 ccb->ccb_h.func_code = XPT_SCAN_TGT;
308 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
313 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
323 /* No need to be in here if debugging isn't enabled */
324 if ((cm->cm_sc->mpr_debug & level) == 0)
327 sbuf_new(&sb, str, sizeof(str), 0);
331 if (cm->cm_ccb != NULL) {
332 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
334 sbuf_cat(&sb, path_str);
335 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
336 scsi_command_string(&cm->cm_ccb->csio, &sb);
337 sbuf_printf(&sb, "length %d ",
338 cm->cm_ccb->csio.dxfer_len);
341 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 cam_sim_name(cm->cm_sc->sassc->sim),
343 cam_sim_unit(cm->cm_sc->sassc->sim),
344 cam_sim_bus(cm->cm_sc->sassc->sim),
345 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 sbuf_vprintf(&sb, fmt, ap);
352 mpr_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb));
358 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
360 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
361 struct mprsas_target *targ;
366 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
367 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
371 /* XXX retry the remove after the diag reset completes? */
372 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
373 "0x%04x\n", __func__, handle);
374 mprsas_free_tm(sc, tm);
378 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) {
379 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
380 "device 0x%x\n", reply->IOCStatus, handle);
381 mprsas_free_tm(sc, tm);
385 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
386 reply->TerminationCount);
387 mpr_free_reply(sc, tm->cm_reply_data);
388 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
390 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
394 * Don't clear target if remove fails because things will get confusing.
395 * Leave the devname and sasaddr intact so that we know to avoid reusing
396 * this target id if possible, and so we can assign the same target id
397 * to this device if it comes back in the future.
399 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) {
402 targ->encl_handle = 0x0;
403 targ->encl_level_valid = 0x0;
404 targ->encl_level = 0x0;
405 targ->connector_name[0] = ' ';
406 targ->connector_name[1] = ' ';
407 targ->connector_name[2] = ' ';
408 targ->connector_name[3] = ' ';
409 targ->encl_slot = 0x0;
410 targ->exp_dev_handle = 0x0;
412 targ->linkrate = 0x0;
415 targ->scsi_req_desc_type = 0;
418 mprsas_free_tm(sc, tm);
423 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
424 * Otherwise Volume Delete is same as Bare Drive Removal.
427 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
429 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
430 struct mpr_softc *sc;
431 struct mpr_command *cm;
432 struct mprsas_target *targ = NULL;
434 MPR_FUNCTRACE(sassc->sc);
437 targ = mprsas_find_target_by_handle(sassc, 0, handle);
439 /* FIXME: what is the action? */
440 /* We don't know about this device? */
441 mpr_dprint(sc, MPR_ERROR,
442 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
446 targ->flags |= MPRSAS_TARGET_INREMOVAL;
448 cm = mprsas_alloc_tm(sc);
450 mpr_dprint(sc, MPR_ERROR,
451 "%s: command alloc failure\n", __func__);
455 mprsas_rescan_target(sc, targ);
457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
458 req->DevHandle = targ->handle;
459 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
460 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
462 /* SAS Hard Link Reset / SATA Link Reset */
463 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
467 cm->cm_desc.HighPriority.RequestFlags =
468 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
469 cm->cm_complete = mprsas_remove_volume;
470 cm->cm_complete_data = (void *)(uintptr_t)handle;
472 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
473 __func__, targ->tid);
474 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
476 mpr_map_command(sc, cm);
480 * The MPT3 firmware performs debounce on the link to avoid transient link
481 * errors and false removals. When it does decide that link has been lost
482 * and a device needs to go away, it expects that the host will perform a
483 * target reset and then an op remove. The reset has the side-effect of
484 * aborting any outstanding requests for the device, which is required for
485 * the op-remove to succeed. It's not clear if the host should check for
486 * the device coming back alive after the reset.
489 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
491 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
492 struct mpr_softc *sc;
493 struct mpr_command *cm;
494 struct mprsas_target *targ = NULL;
496 MPR_FUNCTRACE(sassc->sc);
500 targ = mprsas_find_target_by_handle(sassc, 0, handle);
502 /* FIXME: what is the action? */
503 /* We don't know about this device? */
504 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
509 targ->flags |= MPRSAS_TARGET_INREMOVAL;
511 cm = mprsas_alloc_tm(sc);
513 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
518 mprsas_rescan_target(sc, targ);
520 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
521 memset(req, 0, sizeof(*req));
522 req->DevHandle = htole16(targ->handle);
523 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
524 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
526 /* SAS Hard Link Reset / SATA Link Reset */
527 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
531 cm->cm_desc.HighPriority.RequestFlags =
532 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
533 cm->cm_complete = mprsas_remove_device;
534 cm->cm_complete_data = (void *)(uintptr_t)handle;
536 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
537 __func__, targ->tid);
538 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
540 mpr_map_command(sc, cm);
544 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
546 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
547 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
548 struct mprsas_target *targ;
549 struct mpr_command *next_cm;
554 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
555 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
559 * Currently there should be no way we can hit this case. It only
560 * happens when we have a failure to allocate chain frames, and
561 * task management commands don't have S/G lists.
563 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
564 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
565 "handle %#04x! This should not happen!\n", __func__,
566 tm->cm_flags, handle);
567 mprsas_free_tm(sc, tm);
572 /* XXX retry the remove after the diag reset completes? */
573 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
574 "0x%04x\n", __func__, handle);
575 mprsas_free_tm(sc, tm);
579 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
580 mpr_dprint(sc, MPR_FAULT, "IOCStatus = 0x%x while resetting "
581 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
582 mprsas_free_tm(sc, tm);
586 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 le32toh(reply->TerminationCount));
588 mpr_free_reply(sc, tm->cm_reply_data);
589 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
591 /* Reuse the existing command */
592 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 memset(req, 0, sizeof(*req));
594 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 req->DevHandle = htole16(handle);
598 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 tm->cm_complete = mprsas_remove_complete;
600 tm->cm_complete_data = (void *)(uintptr_t)handle;
602 mpr_map_command(sc, tm);
604 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
606 if (targ->encl_level_valid) {
607 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 targ->connector_name);
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mprsas_scsiio_complete(sc, tm);
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
626 struct mprsas_target *targ;
627 struct mprsas_lun *lun;
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
639 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 "handle %#04x! This should not happen!\n", __func__,
642 tm->cm_flags, handle);
643 mprsas_free_tm(sc, tm);
648 /* most likely a chip reset */
649 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 "0x%04x\n", __func__, handle);
651 mprsas_free_tm(sc, tm);
655 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 __func__, handle, le16toh(reply->IOCStatus));
659 * Don't clear target if remove fails because things will get confusing.
660 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 * this target id if possible, and so we can assign the same target id
662 * to this device if it comes back in the future.
664 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) {
667 targ->encl_handle = 0x0;
668 targ->encl_level_valid = 0x0;
669 targ->encl_level = 0x0;
670 targ->connector_name[0] = ' ';
671 targ->connector_name[1] = ' ';
672 targ->connector_name[2] = ' ';
673 targ->connector_name[3] = ' ';
674 targ->encl_slot = 0x0;
675 targ->exp_dev_handle = 0x0;
677 targ->linkrate = 0x0;
680 targ->scsi_req_desc_type = 0;
682 while (!SLIST_EMPTY(&targ->luns)) {
683 lun = SLIST_FIRST(&targ->luns);
684 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
689 mprsas_free_tm(sc, tm);
693 mprsas_register_events(struct mpr_softc *sc)
698 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
700 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
701 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
703 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
704 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
705 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
706 setbit(events, MPI2_EVENT_IR_VOLUME);
707 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
708 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
709 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
712 &sc->sassc->mprsas_eh);
718 mpr_attach_sas(struct mpr_softc *sc)
720 struct mprsas_softc *sassc;
726 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
728 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
734 * XXX MaxTargets could change during a reinit. Since we don't
735 * resize the targets[] array during such an event, cache the value
736 * of MaxTargets here so that we don't get into trouble later. This
737 * should move into the reinit logic.
739 sassc->maxtargets = sc->facts->MaxTargets;
740 sassc->targets = malloc(sizeof(struct mprsas_target) *
741 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
742 if (!sassc->targets) {
743 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n",
751 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
752 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIMQ\n");
757 unit = device_get_unit(sc->mpr_dev);
758 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
759 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
760 if (sassc->sim == NULL) {
761 mpr_dprint(sc, MPR_ERROR, "Cannot allocate SIM\n");
766 TAILQ_INIT(&sassc->ev_queue);
768 /* Initialize taskqueue for Event Handling */
769 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
770 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
771 taskqueue_thread_enqueue, &sassc->ev_tq);
772 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
773 device_get_nameunit(sc->mpr_dev));
778 * XXX There should be a bus for every port on the adapter, but since
779 * we're just going to fake the topology for now, we'll pretend that
780 * everything is just a target on a single bus.
782 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
783 mpr_dprint(sc, MPR_ERROR, "Error %d registering SCSI bus\n",
790 * Assume that discovery events will start right away.
792 * Hold off boot until discovery is complete.
794 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
795 sc->sassc->startup_refcount = 0;
796 mprsas_startup_increment(sassc);
798 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
801 * Register for async events so we can determine the EEDP
802 * capabilities of devices.
804 status = xpt_create_path(&sassc->path, /*periph*/NULL,
805 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
807 if (status != CAM_REQ_CMP) {
808 mpr_printf(sc, "Error %#x creating sim path\n", status);
813 #if (__FreeBSD_version >= 1000006) || \
814 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
815 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
817 event = AC_FOUND_DEVICE;
821 * Prior to the CAM locking improvements, we can't call
822 * xpt_register_async() with a particular path specified.
824 * If a path isn't specified, xpt_register_async() will
825 * generate a wildcard path and acquire the XPT lock while
826 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
827 * It will then drop the XPT lock once that is done.
829 * If a path is specified for xpt_register_async(), it will
830 * not acquire and drop the XPT lock around the call to
831 * xpt_action(). xpt_action() asserts that the caller
832 * holds the SIM lock, so the SIM lock has to be held when
833 * calling xpt_register_async() when the path is specified.
835 * But xpt_register_async calls xpt_for_all_devices(),
836 * which calls xptbustraverse(), which will acquire each
837 * SIM lock. When it traverses our particular bus, it will
838 * necessarily acquire the SIM lock, which will lead to a
839 * recursive lock acquisition.
841 * The CAM locking changes fix this problem by acquiring
842 * the XPT topology lock around bus traversal in
843 * xptbustraverse(), so the caller can hold the SIM lock
844 * and it does not cause a recursive lock acquisition.
846 * These __FreeBSD_version values are approximate, especially
847 * for stable/10, which is two months later than the actual
851 #if (__FreeBSD_version < 1000703) || \
852 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
854 status = xpt_register_async(event, mprsas_async, sc,
858 status = xpt_register_async(event, mprsas_async, sc,
862 if (status != CAM_REQ_CMP) {
863 mpr_dprint(sc, MPR_ERROR,
864 "Error %#x registering async handler for "
865 "AC_ADVINFO_CHANGED events\n", status);
866 xpt_free_path(sassc->path);
870 if (status != CAM_REQ_CMP) {
872 * EEDP use is the exception, not the rule.
873 * Warn the user, but do not fail to attach.
875 mpr_printf(sc, "EEDP capabilities disabled.\n");
880 mprsas_register_events(sc);
888 mpr_detach_sas(struct mpr_softc *sc)
890 struct mprsas_softc *sassc;
891 struct mprsas_lun *lun, *lun_tmp;
892 struct mprsas_target *targ;
897 if (sc->sassc == NULL)
901 mpr_deregister_events(sc, sassc->mprsas_eh);
904 * Drain and free the event handling taskqueue with the lock
905 * unheld so that any parallel processing tasks drain properly
906 * without deadlocking.
908 if (sassc->ev_tq != NULL)
909 taskqueue_free(sassc->ev_tq);
911 /* Make sure CAM doesn't wedge if we had to bail out early. */
914 /* Deregister our async handler */
915 if (sassc->path != NULL) {
916 xpt_register_async(0, mprsas_async, sc, sassc->path);
917 xpt_free_path(sassc->path);
921 if (sassc->flags & MPRSAS_IN_STARTUP)
922 xpt_release_simq(sassc->sim, 1);
924 if (sassc->sim != NULL) {
925 xpt_bus_deregister(cam_sim_path(sassc->sim));
926 cam_sim_free(sassc->sim, FALSE);
929 sassc->flags |= MPRSAS_SHUTDOWN;
932 if (sassc->devq != NULL)
933 cam_simq_free(sassc->devq);
935 for (i = 0; i < sassc->maxtargets; i++) {
936 targ = &sassc->targets[i];
937 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
941 free(sassc->targets, M_MPR);
949 mprsas_discovery_end(struct mprsas_softc *sassc)
951 struct mpr_softc *sc = sassc->sc;
955 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
956 callout_stop(&sassc->discovery_callout);
961 mprsas_action(struct cam_sim *sim, union ccb *ccb)
963 struct mprsas_softc *sassc;
965 sassc = cam_sim_softc(sim);
967 MPR_FUNCTRACE(sassc->sc);
968 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
969 ccb->ccb_h.func_code);
970 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
972 switch (ccb->ccb_h.func_code) {
975 struct ccb_pathinq *cpi = &ccb->cpi;
977 cpi->version_num = 1;
978 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
979 cpi->target_sprt = 0;
980 #if (__FreeBSD_version >= 1000039) || \
981 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
982 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
984 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
986 cpi->hba_eng_cnt = 0;
987 cpi->max_target = sassc->maxtargets - 1;
989 cpi->initiator_id = sassc->maxtargets - 1;
990 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
991 strncpy(cpi->hba_vid, "Avago Tech (LSI)", HBA_IDLEN);
992 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
993 cpi->unit_number = cam_sim_unit(sim);
994 cpi->bus_id = cam_sim_bus(sim);
996 * XXXSLM-I think this needs to change based on config page or
997 * something instead of hardcoded to 150000.
999 cpi->base_transfer_speed = 150000;
1000 cpi->transport = XPORT_SAS;
1001 cpi->transport_version = 0;
1002 cpi->protocol = PROTO_SCSI;
1003 cpi->protocol_version = SCSI_REV_SPC;
1004 #if __FreeBSD_version >= 800001
1006 * XXXSLM-probably need to base this number on max SGL's and
1009 cpi->maxio = 256 * 1024;
1011 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1014 case XPT_GET_TRAN_SETTINGS:
1016 struct ccb_trans_settings *cts;
1017 struct ccb_trans_settings_sas *sas;
1018 struct ccb_trans_settings_scsi *scsi;
1019 struct mprsas_target *targ;
1022 sas = &cts->xport_specific.sas;
1023 scsi = &cts->proto_specific.scsi;
1025 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1026 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1027 cts->ccb_h.target_id));
1028 targ = &sassc->targets[cts->ccb_h.target_id];
1029 if (targ->handle == 0x0) {
1030 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1034 cts->protocol_version = SCSI_REV_SPC2;
1035 cts->transport = XPORT_SAS;
1036 cts->transport_version = 0;
1038 sas->valid = CTS_SAS_VALID_SPEED;
1039 switch (targ->linkrate) {
1041 sas->bitrate = 150000;
1044 sas->bitrate = 300000;
1047 sas->bitrate = 600000;
1050 sas->bitrate = 1200000;
1056 cts->protocol = PROTO_SCSI;
1057 scsi->valid = CTS_SCSI_VALID_TQ;
1058 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1060 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1063 case XPT_CALC_GEOMETRY:
1064 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1065 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1068 mpr_dprint(sassc->sc, MPR_XINFO,
1069 "mprsas_action XPT_RESET_DEV\n");
1070 mprsas_action_resetdev(sassc, ccb);
1075 mpr_dprint(sassc->sc, MPR_XINFO,
1076 "mprsas_action faking success for abort or reset\n");
1077 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1080 mprsas_action_scsiio(sassc, ccb);
1082 #if __FreeBSD_version >= 900026
1084 mprsas_action_smpio(sassc, ccb);
1088 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1096 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1097 target_id_t target_id, lun_id_t lun_id)
1099 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1100 struct cam_path *path;
1102 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1103 ac_code, target_id, (uintmax_t)lun_id);
1105 if (xpt_create_path(&path, NULL,
1106 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1107 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1112 xpt_async(ac_code, path, NULL);
1113 xpt_free_path(path);
1117 mprsas_complete_all_commands(struct mpr_softc *sc)
1119 struct mpr_command *cm;
1124 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1126 /* complete all commands with a NULL reply */
1127 for (i = 1; i < sc->num_reqs; i++) {
1128 cm = &sc->commands[i];
1129 cm->cm_reply = NULL;
1132 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1133 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1135 if (cm->cm_complete != NULL) {
1136 mprsas_log_command(cm, MPR_RECOVERY,
1137 "completing cm %p state %x ccb %p for diag "
1138 "reset\n", cm, cm->cm_state, cm->cm_ccb);
1139 cm->cm_complete(sc, cm);
1143 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1144 mprsas_log_command(cm, MPR_RECOVERY,
1145 "waking up cm %p state %x ccb %p for diag reset\n",
1146 cm, cm->cm_state, cm->cm_ccb);
1151 if (cm->cm_sc->io_cmds_active != 0) {
1152 cm->cm_sc->io_cmds_active--;
1154 mpr_dprint(cm->cm_sc, MPR_INFO, "Warning: "
1155 "io_cmds_active is out of sync - resynching to "
1159 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1160 /* this should never happen, but if it does, log */
1161 mprsas_log_command(cm, MPR_RECOVERY,
1162 "cm %p state %x flags 0x%x ccb %p during diag "
1163 "reset\n", cm, cm->cm_state, cm->cm_flags,
1170 mprsas_handle_reinit(struct mpr_softc *sc)
1174 /* Go back into startup mode and freeze the simq, so that CAM
1175 * doesn't send any commands until after we've rediscovered all
1176 * targets and found the proper device handles for them.
1178 * After the reset, portenable will trigger discovery, and after all
1179 * discovery-related activities have finished, the simq will be
1182 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1183 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1184 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1185 mprsas_startup_increment(sc->sassc);
1187 /* notify CAM of a bus reset */
1188 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1191 /* complete and cleanup after all outstanding commands */
1192 mprsas_complete_all_commands(sc);
1194 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1195 __func__, sc->sassc->startup_refcount);
1197 /* zero all the target handles, since they may change after the
1198 * reset, and we have to rediscover all the targets and use the new
1201 for (i = 0; i < sc->sassc->maxtargets; i++) {
1202 if (sc->sassc->targets[i].outstanding != 0)
1203 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1204 i, sc->sassc->targets[i].outstanding);
1205 sc->sassc->targets[i].handle = 0x0;
1206 sc->sassc->targets[i].exp_dev_handle = 0x0;
1207 sc->sassc->targets[i].outstanding = 0;
1208 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1212 mprsas_tm_timeout(void *data)
1214 struct mpr_command *tm = data;
1215 struct mpr_softc *sc = tm->cm_sc;
1217 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1219 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY,
1220 "task mgmt %p timed out\n", tm);
1225 mprsas_logical_unit_reset_complete(struct mpr_softc *sc,
1226 struct mpr_command *tm)
1228 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1229 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1230 unsigned int cm_count = 0;
1231 struct mpr_command *cm;
1232 struct mprsas_target *targ;
1234 callout_stop(&tm->cm_callout);
1236 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1237 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1241 * Currently there should be no way we can hit this case. It only
1242 * happens when we have a failure to allocate chain frames, and
1243 * task management commands don't have S/G lists.
1245 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1246 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for LUN reset! "
1247 "This should not happen!\n", __func__, tm->cm_flags);
1248 mprsas_free_tm(sc, tm);
1252 if (reply == NULL) {
1253 mprsas_log_command(tm, MPR_RECOVERY,
1254 "NULL reset reply for tm %p\n", tm);
1255 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1256 /* this completion was due to a reset, just cleanup */
1258 mprsas_free_tm(sc, tm);
1261 /* we should have gotten a reply. */
1267 mprsas_log_command(tm, MPR_RECOVERY,
1268 "logical unit reset status 0x%x code 0x%x count %u\n",
1269 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1270 le32toh(reply->TerminationCount));
1272 /* See if there are any outstanding commands for this LUN.
1273 * This could be made more efficient by using a per-LU data
1274 * structure of some sort.
1276 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1277 if (cm->cm_lun == tm->cm_lun)
1281 if (cm_count == 0) {
1282 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1283 "logical unit %u finished recovery after reset\n",
1286 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1289 /* we've finished recovery for this logical unit. check and
1290 * see if some other logical unit has a timedout command
1291 * that needs to be processed.
1293 cm = TAILQ_FIRST(&targ->timedout_commands);
1295 mprsas_send_abort(sc, tm, cm);
1299 mprsas_free_tm(sc, tm);
1303 /* if we still have commands for this LUN, the reset
1304 * effectively failed, regardless of the status reported.
1305 * Escalate to a target reset.
1307 mprsas_log_command(tm, MPR_RECOVERY,
1308 "logical unit reset complete for tm %p, but still have %u "
1309 "command(s)\n", tm, cm_count);
1310 mprsas_send_reset(sc, tm,
1311 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1316 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1318 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1319 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1320 struct mprsas_target *targ;
1322 callout_stop(&tm->cm_callout);
1324 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1325 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1329 * Currently there should be no way we can hit this case. It only
1330 * happens when we have a failure to allocate chain frames, and
1331 * task management commands don't have S/G lists.
1333 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1334 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1335 "reset! This should not happen!\n", __func__, tm->cm_flags);
1336 mprsas_free_tm(sc, tm);
1340 if (reply == NULL) {
1341 mprsas_log_command(tm, MPR_RECOVERY,
1342 "NULL reset reply for tm %p\n", tm);
1343 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1344 /* this completion was due to a reset, just cleanup */
1346 mprsas_free_tm(sc, tm);
1349 /* we should have gotten a reply. */
1355 mprsas_log_command(tm, MPR_RECOVERY,
1356 "target reset status 0x%x code 0x%x count %u\n",
1357 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1358 le32toh(reply->TerminationCount));
1360 if (targ->outstanding == 0) {
1361 /* we've finished recovery for this target and all
1362 * of its logical units.
1364 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1365 "recovery finished after target reset\n");
1367 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1371 mprsas_free_tm(sc, tm);
1374 /* after a target reset, if this target still has
1375 * outstanding commands, the reset effectively failed,
1376 * regardless of the status reported. escalate.
1378 mprsas_log_command(tm, MPR_RECOVERY,
1379 "target reset complete for tm %p, but still have %u "
1380 "command(s)\n", tm, targ->outstanding);
1385 #define MPR_RESET_TIMEOUT 30
1388 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1390 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1391 struct mprsas_target *target;
1394 target = tm->cm_targ;
1395 if (target->handle == 0) {
1396 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1397 "%d\n", __func__, target->tid);
1401 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1402 req->DevHandle = htole16(target->handle);
1403 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1404 req->TaskType = type;
1406 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1407 /* XXX Need to handle invalid LUNs */
1408 MPR_SET_LUN(req->LUN, tm->cm_lun);
1409 tm->cm_targ->logical_unit_resets++;
1410 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1411 "sending logical unit reset\n");
1412 tm->cm_complete = mprsas_logical_unit_reset_complete;
1413 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1415 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1417 * Target reset method =
1418 * SAS Hard Link Reset / SATA Link Reset
1420 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1421 tm->cm_targ->target_resets++;
1422 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1423 "sending target reset\n");
1424 tm->cm_complete = mprsas_target_reset_complete;
1425 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1428 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1432 mpr_dprint(sc, MPR_INFO, "to target %u handle 0x%04x\n", target->tid,
1434 if (target->encl_level_valid) {
1435 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
1436 "connector name (%4s)\n", target->encl_level,
1437 target->encl_slot, target->connector_name);
1441 tm->cm_desc.HighPriority.RequestFlags =
1442 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1443 tm->cm_complete_data = (void *)tm;
1445 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1446 mprsas_tm_timeout, tm);
1448 err = mpr_map_command(sc, tm);
1450 mprsas_log_command(tm, MPR_RECOVERY,
1451 "error %d sending reset type %u\n", err, type);
1458 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1460 struct mpr_command *cm;
1461 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1462 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1463 struct mprsas_target *targ;
1465 callout_stop(&tm->cm_callout);
1467 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1468 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1472 * Currently there should be no way we can hit this case. It only
1473 * happens when we have a failure to allocate chain frames, and
1474 * task management commands don't have S/G lists.
1476 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1477 mprsas_log_command(tm, MPR_RECOVERY,
1478 "cm_flags = %#x for abort %p TaskMID %u!\n",
1479 tm->cm_flags, tm, le16toh(req->TaskMID));
1480 mprsas_free_tm(sc, tm);
1484 if (reply == NULL) {
1485 mprsas_log_command(tm, MPR_RECOVERY,
1486 "NULL abort reply for tm %p TaskMID %u\n",
1487 tm, le16toh(req->TaskMID));
1488 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1489 /* this completion was due to a reset, just cleanup */
1491 mprsas_free_tm(sc, tm);
1494 /* we should have gotten a reply. */
1500 mprsas_log_command(tm, MPR_RECOVERY,
1501 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1502 le16toh(req->TaskMID),
1503 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1504 le32toh(reply->TerminationCount));
1506 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1508 /* if there are no more timedout commands, we're done with
1509 * error recovery for this target.
1511 mprsas_log_command(tm, MPR_RECOVERY,
1512 "finished recovery after aborting TaskMID %u\n",
1513 le16toh(req->TaskMID));
1516 mprsas_free_tm(sc, tm);
1518 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1519 /* abort success, but we have more timedout commands to abort */
1520 mprsas_log_command(tm, MPR_RECOVERY,
1521 "continuing recovery after aborting TaskMID %u\n",
1522 le16toh(req->TaskMID));
1524 mprsas_send_abort(sc, tm, cm);
1527 /* we didn't get a command completion, so the abort
1528 * failed as far as we're concerned. escalate.
1530 mprsas_log_command(tm, MPR_RECOVERY,
1531 "abort failed for TaskMID %u tm %p\n",
1532 le16toh(req->TaskMID), tm);
1534 mprsas_send_reset(sc, tm,
1535 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1539 #define MPR_ABORT_TIMEOUT 5
1542 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1543 struct mpr_command *cm)
1545 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1546 struct mprsas_target *targ;
1550 if (targ->handle == 0) {
1551 mpr_dprint(sc, MPR_ERROR,"%s null devhandle for target_id %d\n",
1552 __func__, cm->cm_ccb->ccb_h.target_id);
1556 mprsas_log_command(tm, MPR_RECOVERY|MPR_INFO,
1557 "Aborting command %p\n", cm);
1559 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1560 req->DevHandle = htole16(targ->handle);
1561 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1562 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1564 /* XXX Need to handle invalid LUNs */
1565 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1567 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1570 tm->cm_desc.HighPriority.RequestFlags =
1571 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1572 tm->cm_complete = mprsas_abort_complete;
1573 tm->cm_complete_data = (void *)tm;
1574 tm->cm_targ = cm->cm_targ;
1575 tm->cm_lun = cm->cm_lun;
1577 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1578 mprsas_tm_timeout, tm);
1582 mpr_dprint(sc, MPR_INFO, "Sending reset from %s for target ID %d\n",
1583 __func__, targ->tid);
1584 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1586 err = mpr_map_command(sc, tm);
1588 mprsas_log_command(tm, MPR_RECOVERY,
1589 "error %d sending abort for cm %p SMID %u\n",
1590 err, cm, req->TaskMID);
1595 mprsas_scsiio_timeout(void *data)
1597 struct mpr_softc *sc;
1598 struct mpr_command *cm;
1599 struct mprsas_target *targ;
1601 cm = (struct mpr_command *)data;
1605 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1607 mpr_dprint(sc, MPR_XINFO, "Timeout checking cm %p\n", cm);
1610 * Run the interrupt handler to make sure it's not pending. This
1611 * isn't perfect because the command could have already completed
1612 * and been re-used, though this is unlikely.
1614 mpr_intr_locked(sc);
1615 if (cm->cm_state == MPR_CM_STATE_FREE) {
1616 mprsas_log_command(cm, MPR_XINFO,
1617 "SCSI command %p almost timed out\n", cm);
1621 if (cm->cm_ccb == NULL) {
1622 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1629 mprsas_log_command(cm, MPR_ERROR, "command timeout cm %p ccb %p "
1630 "target %u, handle(0x%04x)\n", cm, cm->cm_ccb, targ->tid,
1632 if (targ->encl_level_valid) {
1633 mpr_dprint(sc, MPR_ERROR, "At enclosure level %d, slot %d, "
1634 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
1635 targ->connector_name);
1638 /* XXX first, check the firmware state, to see if it's still
1639 * operational. if not, do a diag reset.
1641 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1642 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1643 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1645 if (targ->tm != NULL) {
1646 /* target already in recovery, just queue up another
1647 * timedout command to be processed later.
1649 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1650 "processing by tm %p\n", cm, targ->tm);
1652 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1653 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1656 /* start recovery by aborting the first timedout command */
1657 mprsas_send_abort(sc, targ->tm, cm);
1660 /* XXX queue this target up for recovery once a TM becomes
1661 * available. The firmware only has a limited number of
1662 * HighPriority credits for the high priority requests used
1663 * for task management, and we ran out.
1665 * Isilon: don't worry about this for now, since we have
1666 * more credits than disks in an enclosure, and limit
1667 * ourselves to one TM per target for recovery.
1669 mpr_dprint(sc, MPR_RECOVERY,
1670 "timedout cm %p failed to allocate a tm\n", cm);
1675 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1677 MPI2_SCSI_IO_REQUEST *req;
1678 struct ccb_scsiio *csio;
1679 struct mpr_softc *sc;
1680 struct mprsas_target *targ;
1681 struct mprsas_lun *lun;
1682 struct mpr_command *cm;
1683 uint8_t i, lba_byte, *ref_tag_addr;
1684 uint16_t eedp_flags;
1685 uint32_t mpi_control;
1689 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1692 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1693 ("Target %d out of bounds in XPT_SCSI_IO\n",
1694 csio->ccb_h.target_id));
1695 targ = &sassc->targets[csio->ccb_h.target_id];
1696 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1697 if (targ->handle == 0x0) {
1698 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1699 __func__, csio->ccb_h.target_id);
1700 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1704 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1705 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1706 "supported %u\n", __func__, csio->ccb_h.target_id);
1707 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1712 * Sometimes, it is possible to get a command that is not "In
1713 * Progress" and was actually aborted by the upper layer. Check for
1714 * this here and complete the command without error.
1716 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1717 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1718 "target %u\n", __func__, csio->ccb_h.target_id);
1723 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1724 * that the volume has timed out. We want volumes to be enumerated
1725 * until they are deleted/removed, not just failed.
1727 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1728 if (targ->devinfo == 0)
1729 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1731 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1736 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1737 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1738 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1744 * If target has a reset in progress, freeze the devq and return. The
1745 * devq will be released when the TM reset is finished.
1747 if (targ->flags & MPRSAS_TARGET_INRESET) {
1748 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1749 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1750 __func__, targ->tid);
1751 xpt_freeze_devq(ccb->ccb_h.path, 1);
1756 cm = mpr_alloc_command(sc);
1757 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1759 mpr_free_command(sc, cm);
1761 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1762 xpt_freeze_simq(sassc->sim, 1);
1763 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1765 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1766 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1771 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1772 bzero(req, sizeof(*req));
1773 req->DevHandle = htole16(targ->handle);
1774 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1776 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1777 req->SenseBufferLength = MPR_SENSE_LEN;
1779 req->ChainOffset = 0;
1780 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1785 req->DataLength = htole32(csio->dxfer_len);
1786 req->BidirectionalDataLength = 0;
1787 req->IoFlags = htole16(csio->cdb_len);
1790 /* Note: BiDirectional transfers are not supported */
1791 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1793 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1794 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1797 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1798 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1802 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1806 if (csio->cdb_len == 32)
1807 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1809 * It looks like the hardware doesn't require an explicit tag
1810 * number for each transaction. SAM Task Management not supported
1813 switch (csio->tag_action) {
1814 case MSG_HEAD_OF_Q_TAG:
1815 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1817 case MSG_ORDERED_Q_TAG:
1818 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1821 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1823 case CAM_TAG_ACTION_NONE:
1824 case MSG_SIMPLE_Q_TAG:
1826 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1829 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1830 req->Control = htole32(mpi_control);
1832 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1833 mpr_free_command(sc, cm);
1834 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1839 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1840 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1842 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1843 req->IoFlags = htole16(csio->cdb_len);
1846 * Check if EEDP is supported and enabled. If it is then check if the
1847 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1848 * is formatted for EEDP support. If all of this is true, set CDB up
1849 * for EEDP transfer.
1851 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1852 if (sc->eedp_enabled && eedp_flags) {
1853 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1854 if (lun->lun_id == csio->ccb_h.target_lun) {
1859 if ((lun != NULL) && (lun->eedp_formatted)) {
1860 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1861 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1862 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1863 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1864 req->EEDPFlags = htole16(eedp_flags);
1867 * If CDB less than 32, fill in Primary Ref Tag with
1868 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1869 * already there. Also, set protection bit. FreeBSD
1870 * currently does not support CDBs bigger than 16, but
1871 * the code doesn't hurt, and will be here for the
1874 if (csio->cdb_len != 32) {
1875 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1876 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1877 PrimaryReferenceTag;
1878 for (i = 0; i < 4; i++) {
1880 req->CDB.CDB32[lba_byte + i];
1883 req->CDB.EEDP32.PrimaryReferenceTag =
1885 CDB.EEDP32.PrimaryReferenceTag);
1886 req->CDB.EEDP32.PrimaryApplicationTagMask =
1888 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1892 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1893 req->EEDPFlags = htole16(eedp_flags);
1894 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1900 cm->cm_length = csio->dxfer_len;
1901 if (cm->cm_length != 0) {
1903 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
1907 cm->cm_sge = &req->SGL;
1908 cm->cm_sglsize = (32 - 24) * 4;
1909 cm->cm_complete = mprsas_scsiio_complete;
1910 cm->cm_complete_data = ccb;
1912 cm->cm_lun = csio->ccb_h.target_lun;
1915 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
1916 * and set descriptor type.
1918 if (targ->scsi_req_desc_type ==
1919 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
1920 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
1921 cm->cm_desc.FastPathSCSIIO.RequestFlags =
1922 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
1923 cm->cm_desc.FastPathSCSIIO.DevHandle = htole16(targ->handle);
1925 cm->cm_desc.SCSIIO.RequestFlags =
1926 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1927 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1930 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1931 mprsas_scsiio_timeout, cm, 0);
1934 targ->outstanding++;
1935 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1936 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1938 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1939 __func__, cm, ccb, targ->outstanding);
1941 mpr_map_command(sc, cm);
1946 mpr_response_code(struct mpr_softc *sc, u8 response_code)
1950 switch (response_code) {
1951 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
1952 desc = "task management request completed";
1954 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
1955 desc = "invalid frame";
1957 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
1958 desc = "task management request not supported";
1960 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
1961 desc = "task management request failed";
1963 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
1964 desc = "task management request succeeded";
1966 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
1967 desc = "invalid lun";
1970 desc = "overlapped tag attempted";
1972 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
1973 desc = "task queued, however not sent to target";
1979 mpr_dprint(sc, MPR_XINFO, "response_code(0x%01x): %s\n", response_code,
1984 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
1987 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
1988 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
1992 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1993 MPI2_IOCSTATUS_MASK;
1994 u8 scsi_state = mpi_reply->SCSIState;
1995 u8 scsi_status = mpi_reply->SCSIStatus;
1996 char *desc_ioc_state = NULL;
1997 char *desc_scsi_status = NULL;
1998 char *desc_scsi_state = sc->tmp_string;
1999 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2001 if (log_info == 0x31170000)
2004 switch (ioc_status) {
2005 case MPI2_IOCSTATUS_SUCCESS:
2006 desc_ioc_state = "success";
2008 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2009 desc_ioc_state = "invalid function";
2011 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2012 desc_ioc_state = "scsi recovered error";
2014 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2015 desc_ioc_state = "scsi invalid dev handle";
2017 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2018 desc_ioc_state = "scsi device not there";
2020 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2021 desc_ioc_state = "scsi data overrun";
2023 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2024 desc_ioc_state = "scsi data underrun";
2026 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2027 desc_ioc_state = "scsi io data error";
2029 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2030 desc_ioc_state = "scsi protocol error";
2032 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2033 desc_ioc_state = "scsi task terminated";
2035 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2036 desc_ioc_state = "scsi residual mismatch";
2038 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2039 desc_ioc_state = "scsi task mgmt failed";
2041 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2042 desc_ioc_state = "scsi ioc terminated";
2044 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2045 desc_ioc_state = "scsi ext terminated";
2047 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
2048 desc_ioc_state = "eedp guard error";
2050 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
2051 desc_ioc_state = "eedp ref tag error";
2053 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
2054 desc_ioc_state = "eedp app tag error";
2057 desc_ioc_state = "unknown";
2061 switch (scsi_status) {
2062 case MPI2_SCSI_STATUS_GOOD:
2063 desc_scsi_status = "good";
2065 case MPI2_SCSI_STATUS_CHECK_CONDITION:
2066 desc_scsi_status = "check condition";
2068 case MPI2_SCSI_STATUS_CONDITION_MET:
2069 desc_scsi_status = "condition met";
2071 case MPI2_SCSI_STATUS_BUSY:
2072 desc_scsi_status = "busy";
2074 case MPI2_SCSI_STATUS_INTERMEDIATE:
2075 desc_scsi_status = "intermediate";
2077 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
2078 desc_scsi_status = "intermediate condmet";
2080 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
2081 desc_scsi_status = "reservation conflict";
2083 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
2084 desc_scsi_status = "command terminated";
2086 case MPI2_SCSI_STATUS_TASK_SET_FULL:
2087 desc_scsi_status = "task set full";
2089 case MPI2_SCSI_STATUS_ACA_ACTIVE:
2090 desc_scsi_status = "aca active";
2092 case MPI2_SCSI_STATUS_TASK_ABORTED:
2093 desc_scsi_status = "task aborted";
2096 desc_scsi_status = "unknown";
2100 desc_scsi_state[0] = '\0';
2102 desc_scsi_state = " ";
2103 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
2104 strcat(desc_scsi_state, "response info ");
2105 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
2106 strcat(desc_scsi_state, "state terminated ");
2107 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
2108 strcat(desc_scsi_state, "no status ");
2109 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
2110 strcat(desc_scsi_state, "autosense failed ");
2111 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
2112 strcat(desc_scsi_state, "autosense valid ");
2114 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2115 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2116 if (targ->encl_level_valid) {
2117 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2118 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2119 targ->connector_name);
2121 /* We can add more detail about underflow data here
2124 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2125 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status,
2126 desc_scsi_state, scsi_state);
2128 if (sc->mpr_debug & MPR_XINFO &&
2129 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2130 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2131 scsi_sense_print(csio);
2132 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2135 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2136 response_info = le32toh(mpi_reply->ResponseInfo);
2137 response_bytes = (u8 *)&response_info;
2138 mpr_response_code(sc,response_bytes[0]);
2143 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2145 MPI2_SCSI_IO_REPLY *rep;
2147 struct ccb_scsiio *csio;
2148 struct mprsas_softc *sassc;
2149 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2150 u8 *TLR_bits, TLR_on;
2153 struct mprsas_target *target;
2154 target_id_t target_id;
2157 mpr_dprint(sc, MPR_TRACE,
2158 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2159 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2160 cm->cm_targ->outstanding);
2162 callout_stop(&cm->cm_callout);
2163 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2166 ccb = cm->cm_complete_data;
2168 target_id = csio->ccb_h.target_id;
2169 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2171 * XXX KDM if the chain allocation fails, does it matter if we do
2172 * the sync and unload here? It is simpler to do it in every case,
2173 * assuming it doesn't cause problems.
2175 if (cm->cm_data != NULL) {
2176 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2177 dir = BUS_DMASYNC_POSTREAD;
2178 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2179 dir = BUS_DMASYNC_POSTWRITE;
2180 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2181 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2184 cm->cm_targ->completed++;
2185 cm->cm_targ->outstanding--;
2186 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2187 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2189 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2190 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2191 if (cm->cm_reply != NULL)
2192 mprsas_log_command(cm, MPR_RECOVERY,
2193 "completed timedout cm %p ccb %p during recovery "
2194 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2195 le16toh(rep->IOCStatus), rep->SCSIStatus,
2196 rep->SCSIState, le32toh(rep->TransferCount));
2198 mprsas_log_command(cm, MPR_RECOVERY,
2199 "completed timedout cm %p ccb %p during recovery\n",
2201 } else if (cm->cm_targ->tm != NULL) {
2202 if (cm->cm_reply != NULL)
2203 mprsas_log_command(cm, MPR_RECOVERY,
2204 "completed cm %p ccb %p during recovery "
2205 "ioc %x scsi %x state %x xfer %u\n",
2206 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2207 rep->SCSIStatus, rep->SCSIState,
2208 le32toh(rep->TransferCount));
2210 mprsas_log_command(cm, MPR_RECOVERY,
2211 "completed cm %p ccb %p during recovery\n",
2213 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2214 mprsas_log_command(cm, MPR_RECOVERY,
2215 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2218 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2220 * We ran into an error after we tried to map the command,
2221 * so we're getting a callback without queueing the command
2222 * to the hardware. So we set the status here, and it will
2223 * be retained below. We'll go through the "fast path",
2224 * because there can be no reply when we haven't actually
2225 * gone out to the hardware.
2227 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2230 * Currently the only error included in the mask is
2231 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2232 * chain frames. We need to freeze the queue until we get
2233 * a command that completed without this error, which will
2234 * hopefully have some chain frames attached that we can
2235 * use. If we wanted to get smarter about it, we would
2236 * only unfreeze the queue in this condition when we're
2237 * sure that we're getting some chain frames back. That's
2238 * probably unnecessary.
2240 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2241 xpt_freeze_simq(sassc->sim, 1);
2242 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2243 mpr_dprint(sc, MPR_INFO, "Error sending command, "
2244 "freezing SIM queue\n");
2249 * If this is a Start Stop Unit command and it was issued by the driver
2250 * during shutdown, decrement the refcount to account for all of the
2251 * commands that were sent. All SSU commands should be completed before
2252 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2255 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2256 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2260 /* Take the fast path to completion */
2261 if (cm->cm_reply == NULL) {
2262 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2263 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2264 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2266 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2267 csio->scsi_status = SCSI_STATUS_OK;
2269 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2270 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2271 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2272 mpr_dprint(sc, MPR_XINFO,
2273 "Unfreezing SIM queue\n");
2278 * There are two scenarios where the status won't be
2279 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2280 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2282 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2284 * Freeze the dev queue so that commands are
2285 * executed in the correct order after error
2288 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2289 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2291 mpr_free_command(sc, cm);
2296 mprsas_log_command(cm, MPR_XINFO,
2297 "ioc %x scsi %x state %x xfer %u\n",
2298 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2299 le32toh(rep->TransferCount));
2301 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2302 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2303 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2305 case MPI2_IOCSTATUS_SUCCESS:
2306 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2308 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2309 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2310 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2312 /* Completion failed at the transport level. */
2313 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2314 MPI2_SCSI_STATE_TERMINATED)) {
2315 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2319 /* In a modern packetized environment, an autosense failure
2320 * implies that there's not much else that can be done to
2321 * recover the command.
2323 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2324 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2329 * CAM doesn't care about SAS Response Info data, but if this is
2330 * the state check if TLR should be done. If not, clear the
2331 * TLR_bits for the target.
2333 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2334 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2335 == MPR_SCSI_RI_INVALID_FRAME)) {
2336 sc->mapping_table[target_id].TLR_bits =
2337 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2341 * Intentionally override the normal SCSI status reporting
2342 * for these two cases. These are likely to happen in a
2343 * multi-initiator environment, and we want to make sure that
2344 * CAM retries these commands rather than fail them.
2346 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2347 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2348 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2352 /* Handle normal status and sense */
2353 csio->scsi_status = rep->SCSIStatus;
2354 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2355 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2357 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2359 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2360 int sense_len, returned_sense_len;
2362 returned_sense_len = min(le32toh(rep->SenseCount),
2363 sizeof(struct scsi_sense_data));
2364 if (returned_sense_len < csio->sense_len)
2365 csio->sense_resid = csio->sense_len -
2368 csio->sense_resid = 0;
2370 sense_len = min(returned_sense_len,
2371 csio->sense_len - csio->sense_resid);
2372 bzero(&csio->sense_data, sizeof(csio->sense_data));
2373 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2374 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2378 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2379 * and it's page code 0 (Supported Page List), and there is
2380 * inquiry data, and this is for a sequential access device, and
2381 * the device is an SSP target, and TLR is supported by the
2382 * controller, turn the TLR_bits value ON if page 0x90 is
2385 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2386 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2387 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2388 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2389 (csio->data_ptr != NULL) &&
2390 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2391 (sc->control_TLR) &&
2392 (sc->mapping_table[target_id].device_info &
2393 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2394 vpd_list = (struct scsi_vpd_supported_page_list *)
2396 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2397 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2398 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2399 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2400 csio->cdb_io.cdb_bytes[4];
2401 alloc_len -= csio->resid;
2402 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2403 if (vpd_list->list[i] == 0x90) {
2411 * If this is a SATA direct-access end device, mark it so that
2412 * a SCSI StartStopUnit command will be sent to it when the
2413 * driver is being shutdown.
2415 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2416 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2417 (sc->mapping_table[target_id].device_info &
2418 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2419 ((sc->mapping_table[target_id].device_info &
2420 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2421 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2422 target = &sassc->targets[target_id];
2423 target->supports_SSU = TRUE;
2424 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2428 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2429 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2431 * If devinfo is 0 this will be a volume. In that case don't
2432 * tell CAM that the volume is not there. We want volumes to
2433 * be enumerated until they are deleted/removed, not just
2436 if (cm->cm_targ->devinfo == 0)
2437 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2439 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2441 case MPI2_IOCSTATUS_INVALID_SGL:
2442 mpr_print_scsiio_cmd(sc, cm);
2443 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2445 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2447 * This is one of the responses that comes back when an I/O
2448 * has been aborted. If it is because of a timeout that we
2449 * initiated, just set the status to CAM_CMD_TIMEOUT.
2450 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2451 * command is the same (it gets retried, subject to the
2452 * retry counter), the only difference is what gets printed
2455 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2456 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2458 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2460 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2461 /* resid is ignored for this condition */
2463 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2465 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2466 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2468 * Since these are generally external (i.e. hopefully
2469 * transient transport-related) errors, retry these without
2470 * decrementing the retry count.
2472 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2473 mprsas_log_command(cm, MPR_INFO,
2474 "terminated ioc %x scsi %x state %x xfer %u\n",
2475 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2476 le32toh(rep->TransferCount));
2478 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2479 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2480 case MPI2_IOCSTATUS_INVALID_VPID:
2481 case MPI2_IOCSTATUS_INVALID_FIELD:
2482 case MPI2_IOCSTATUS_INVALID_STATE:
2483 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2484 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2485 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2486 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2487 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2489 mprsas_log_command(cm, MPR_XINFO,
2490 "completed ioc %x scsi %x state %x xfer %u\n",
2491 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2492 le32toh(rep->TransferCount));
2493 csio->resid = cm->cm_length;
2494 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2498 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2500 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2501 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2502 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2503 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2507 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2508 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2509 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2512 mpr_free_command(sc, cm);
2516 #if __FreeBSD_version >= 900026
2518 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2520 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2521 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2525 ccb = cm->cm_complete_data;
2528 * Currently there should be no way we can hit this case. It only
2529 * happens when we have a failure to allocate chain frames, and SMP
2530 * commands require two S/G elements only. That should be handled
2531 * in the standard request size.
2533 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2534 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2535 "request!\n", __func__, cm->cm_flags);
2536 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2540 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2542 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2543 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2547 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2548 sasaddr = le32toh(req->SASAddress.Low);
2549 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2551 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2552 MPI2_IOCSTATUS_SUCCESS ||
2553 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2554 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2555 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2556 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2560 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2561 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2563 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2564 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2566 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2570 * We sync in both directions because we had DMAs in the S/G list
2571 * in both directions.
2573 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2574 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2575 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2576 mpr_free_command(sc, cm);
2581 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
2584 struct mpr_command *cm;
2585 uint8_t *request, *response;
2586 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2587 struct mpr_softc *sc;
2595 #if (__FreeBSD_version >= 1000028) || \
2596 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2597 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2598 case CAM_DATA_PADDR:
2599 case CAM_DATA_SG_PADDR:
2601 * XXX We don't yet support physical addresses here.
2603 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2604 "supported\n", __func__);
2605 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2610 * The chip does not support more than one buffer for the
2611 * request or response.
2613 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2614 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2615 mpr_dprint(sc, MPR_ERROR,
2616 "%s: multiple request or response buffer segments "
2617 "not supported for SMP\n", __func__);
2618 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2624 * The CAM_SCATTER_VALID flag was originally implemented
2625 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2626 * We have two. So, just take that flag to mean that we
2627 * might have S/G lists, and look at the S/G segment count
2628 * to figure out whether that is the case for each individual
2631 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2632 bus_dma_segment_t *req_sg;
2634 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2635 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2637 request = ccb->smpio.smp_request;
2639 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2640 bus_dma_segment_t *rsp_sg;
2642 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2643 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2645 response = ccb->smpio.smp_response;
2647 case CAM_DATA_VADDR:
2648 request = ccb->smpio.smp_request;
2649 response = ccb->smpio.smp_response;
2652 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2656 #else /* __FreeBSD_version < 1000028 */
2658 * XXX We don't yet support physical addresses here.
2660 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
2661 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2662 "supported\n", __func__);
2663 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2669 * If the user wants to send an S/G list, check to make sure they
2670 * have single buffers.
2672 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
2674 * The chip does not support more than one buffer for the
2675 * request or response.
2677 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2678 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2679 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2680 "response buffer segments not supported for SMP\n",
2682 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2688 * The CAM_SCATTER_VALID flag was originally implemented
2689 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2690 * We have two. So, just take that flag to mean that we
2691 * might have S/G lists, and look at the S/G segment count
2692 * to figure out whether that is the case for each individual
2695 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2696 bus_dma_segment_t *req_sg;
2698 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2699 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2701 request = ccb->smpio.smp_request;
2703 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2704 bus_dma_segment_t *rsp_sg;
2706 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2707 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2709 response = ccb->smpio.smp_response;
2711 request = ccb->smpio.smp_request;
2712 response = ccb->smpio.smp_response;
2714 #endif /* __FreeBSD_version < 1000028 */
2716 cm = mpr_alloc_command(sc);
2718 mpr_dprint(sc, MPR_ERROR,
2719 "%s: cannot allocate command\n", __func__);
2720 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2725 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2726 bzero(req, sizeof(*req));
2727 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2729 /* Allow the chip to use any route to this SAS address. */
2730 req->PhysicalPort = 0xff;
2732 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2734 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2736 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2737 "%#jx\n", __func__, (uintmax_t)sasaddr);
2739 mpr_init_sge(cm, req, &req->SGL);
2742 * Set up a uio to pass into mpr_map_command(). This allows us to
2743 * do one map command, and one busdma call in there.
2745 cm->cm_uio.uio_iov = cm->cm_iovec;
2746 cm->cm_uio.uio_iovcnt = 2;
2747 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2750 * The read/write flag isn't used by busdma, but set it just in
2751 * case. This isn't exactly accurate, either, since we're going in
2754 cm->cm_uio.uio_rw = UIO_WRITE;
2756 cm->cm_iovec[0].iov_base = request;
2757 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2758 cm->cm_iovec[1].iov_base = response;
2759 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2761 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2762 cm->cm_iovec[1].iov_len;
2765 * Trigger a warning message in mpr_data_cb() for the user if we
2766 * wind up exceeding two S/G segments. The chip expects one
2767 * segment for the request and another for the response.
2769 cm->cm_max_segs = 2;
2771 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2772 cm->cm_complete = mprsas_smpio_complete;
2773 cm->cm_complete_data = ccb;
2776 * Tell the mapping code that we're using a uio, and that this is
2777 * an SMP passthrough request. There is a little special-case
2778 * logic there (in mpr_data_cb()) to handle the bidirectional
2781 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
2782 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
2784 /* The chip data format is little endian. */
2785 req->SASAddress.High = htole32(sasaddr >> 32);
2786 req->SASAddress.Low = htole32(sasaddr);
2789 * XXX Note that we don't have a timeout/abort mechanism here.
2790 * From the manual, it looks like task management requests only
2791 * work for SCSI IO and SATA passthrough requests. We may need to
2792 * have a mechanism to retry requests in the event of a chip reset
2793 * at least. Hopefully the chip will insure that any errors short
2794 * of that are relayed back to the driver.
2796 error = mpr_map_command(sc, cm);
2797 if ((error != 0) && (error != EINPROGRESS)) {
2798 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
2799 "mpr_map_command()\n", __func__, error);
2806 mpr_free_command(sc, cm);
2807 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2813 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
2815 struct mpr_softc *sc;
2816 struct mprsas_target *targ;
2817 uint64_t sasaddr = 0;
2822 * Make sure the target exists.
2824 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2825 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2826 targ = &sassc->targets[ccb->ccb_h.target_id];
2827 if (targ->handle == 0x0) {
2828 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
2829 __func__, ccb->ccb_h.target_id);
2830 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2836 * If this device has an embedded SMP target, we'll talk to it
2838 * figure out what the expander's address is.
2840 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2841 sasaddr = targ->sasaddr;
2844 * If we don't have a SAS address for the expander yet, try
2845 * grabbing it from the page 0x83 information cached in the
2846 * transport layer for this target. LSI expanders report the
2847 * expander SAS address as the port-associated SAS address in
2848 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2851 * XXX KDM disable this for now, but leave it commented out so that
2852 * it is obvious that this is another possible way to get the SAS
2855 * The parent handle method below is a little more reliable, and
2856 * the other benefit is that it works for devices other than SES
2857 * devices. So you can send a SMP request to a da(4) device and it
2858 * will get routed to the expander that device is attached to.
2859 * (Assuming the da(4) device doesn't contain an SMP target...)
2863 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2867 * If we still don't have a SAS address for the expander, look for
2868 * the parent device of this device, which is probably the expander.
2871 #ifdef OLD_MPR_PROBE
2872 struct mprsas_target *parent_target;
2875 if (targ->parent_handle == 0x0) {
2876 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2877 "a valid parent handle!\n", __func__, targ->handle);
2878 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2881 #ifdef OLD_MPR_PROBE
2882 parent_target = mprsas_find_target_by_handle(sassc, 0,
2883 targ->parent_handle);
2885 if (parent_target == NULL) {
2886 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
2887 "a valid parent target!\n", __func__, targ->handle);
2888 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2892 if ((parent_target->devinfo &
2893 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2894 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2895 "does not have an SMP target!\n", __func__,
2896 targ->handle, parent_target->handle);
2897 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2901 sasaddr = parent_target->sasaddr;
2902 #else /* OLD_MPR_PROBE */
2903 if ((targ->parent_devinfo &
2904 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2905 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
2906 "does not have an SMP target!\n", __func__,
2907 targ->handle, targ->parent_handle);
2908 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2912 if (targ->parent_sasaddr == 0x0) {
2913 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
2914 "%d does not have a valid SAS address!\n", __func__,
2915 targ->handle, targ->parent_handle);
2916 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2920 sasaddr = targ->parent_sasaddr;
2921 #endif /* OLD_MPR_PROBE */
2926 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
2927 "handle %d\n", __func__, targ->handle);
2928 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2931 mprsas_send_smpcmd(sassc, ccb, sasaddr);
2939 #endif //__FreeBSD_version >= 900026
2942 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
2944 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
2945 struct mpr_softc *sc;
2946 struct mpr_command *tm;
2947 struct mprsas_target *targ;
2949 MPR_FUNCTRACE(sassc->sc);
2950 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
2952 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2953 ("Target %d out of bounds in XPT_RESET_DEV\n",
2954 ccb->ccb_h.target_id));
2956 tm = mpr_alloc_command(sc);
2958 mpr_dprint(sc, MPR_ERROR,
2959 "command alloc failure in mprsas_action_resetdev\n");
2960 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2965 targ = &sassc->targets[ccb->ccb_h.target_id];
2966 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
2967 req->DevHandle = htole16(targ->handle);
2968 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2969 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
2971 /* SAS Hard Link Reset / SATA Link Reset */
2972 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2975 tm->cm_desc.HighPriority.RequestFlags =
2976 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2977 tm->cm_complete = mprsas_resetdev_complete;
2978 tm->cm_complete_data = ccb;
2980 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
2981 __func__, targ->tid);
2983 targ->flags |= MPRSAS_TARGET_INRESET;
2985 mpr_map_command(sc, tm);
2989 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
2991 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
2995 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2997 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
2998 ccb = tm->cm_complete_data;
3001 * Currently there should be no way we can hit this case. It only
3002 * happens when we have a failure to allocate chain frames, and
3003 * task management commands don't have S/G lists.
3005 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3006 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3008 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3010 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3011 "handle %#04x! This should not happen!\n", __func__,
3012 tm->cm_flags, req->DevHandle);
3013 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3017 mpr_dprint(sc, MPR_XINFO,
3018 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3019 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3021 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3022 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3023 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3027 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3031 mprsas_free_tm(sc, tm);
3036 mprsas_poll(struct cam_sim *sim)
3038 struct mprsas_softc *sassc;
3040 sassc = cam_sim_softc(sim);
3042 if (sassc->sc->mpr_debug & MPR_TRACE) {
3043 /* frequent debug messages during a panic just slow
3044 * everything down too much.
3046 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3048 sassc->sc->mpr_debug &= ~MPR_TRACE;
3051 mpr_intr_locked(sassc->sc);
3055 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3058 struct mpr_softc *sc;
3060 sc = (struct mpr_softc *)callback_arg;
3063 #if (__FreeBSD_version >= 1000006) || \
3064 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3065 case AC_ADVINFO_CHANGED: {
3066 struct mprsas_target *target;
3067 struct mprsas_softc *sassc;
3068 struct scsi_read_capacity_data_long rcap_buf;
3069 struct ccb_dev_advinfo cdai;
3070 struct mprsas_lun *lun;
3075 buftype = (uintptr_t)arg;
3081 * We're only interested in read capacity data changes.
3083 if (buftype != CDAI_TYPE_RCAPLONG)
3087 * See the comment in mpr_attach_sas() for a detailed
3088 * explanation. In these versions of FreeBSD we register
3089 * for all events and filter out the events that don't
3092 #if (__FreeBSD_version < 1000703) || \
3093 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3094 if (xpt_path_path_id(path) != sassc->sim->path_id)
3099 * We should have a handle for this, but check to make sure.
3101 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3102 ("Target %d out of bounds in mprsas_async\n",
3103 xpt_path_target_id(path)));
3104 target = &sassc->targets[xpt_path_target_id(path)];
3105 if (target->handle == 0)
3108 lunid = xpt_path_lun_id(path);
3110 SLIST_FOREACH(lun, &target->luns, lun_link) {
3111 if (lun->lun_id == lunid) {
3117 if (found_lun == 0) {
3118 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3121 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3122 "LUN for EEDP support.\n");
3125 lun->lun_id = lunid;
3126 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3129 bzero(&rcap_buf, sizeof(rcap_buf));
3130 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3131 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3132 cdai.ccb_h.flags = CAM_DIR_IN;
3133 cdai.buftype = CDAI_TYPE_RCAPLONG;
3134 #if (__FreeBSD_version >= 1100061) || \
3135 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3136 cdai.flags = CDAI_FLAG_NONE;
3140 cdai.bufsiz = sizeof(rcap_buf);
3141 cdai.buf = (uint8_t *)&rcap_buf;
3142 xpt_action((union ccb *)&cdai);
3143 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3144 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3146 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3147 && (rcap_buf.prot & SRC16_PROT_EN)) {
3148 lun->eedp_formatted = TRUE;
3149 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3151 lun->eedp_formatted = FALSE;
3152 lun->eedp_block_size = 0;
3157 case AC_FOUND_DEVICE: {
3158 struct ccb_getdev *cgd;
3161 * See the comment in mpr_attach_sas() for a detailed
3162 * explanation. In these versions of FreeBSD we register
3163 * for all events and filter out the events that don't
3166 #if (__FreeBSD_version < 1000703) || \
3167 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3168 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3173 #if (__FreeBSD_version < 901503) || \
3174 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3175 mprsas_check_eedp(sc, path, cgd);
3184 #if (__FreeBSD_version < 901503) || \
3185 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3187 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3188 struct ccb_getdev *cgd)
3190 struct mprsas_softc *sassc = sc->sassc;
3191 struct ccb_scsiio *csio;
3192 struct scsi_read_capacity_16 *scsi_cmd;
3193 struct scsi_read_capacity_eedp *rcap_buf;
3195 target_id_t targetid;
3198 struct cam_path *local_path;
3199 struct mprsas_target *target;
3200 struct mprsas_lun *lun;
3204 pathid = cam_sim_path(sassc->sim);
3205 targetid = xpt_path_target_id(path);
3206 lunid = xpt_path_lun_id(path);
3208 KASSERT(targetid < sassc->maxtargets,
3209 ("Target %d out of bounds in mprsas_check_eedp\n", targetid));
3210 target = &sassc->targets[targetid];
3211 if (target->handle == 0x0)
3215 * Determine if the device is EEDP capable.
3217 * If this flag is set in the inquiry data, the device supports
3218 * protection information, and must support the 16 byte read capacity
3219 * command, otherwise continue without sending read cap 16
3221 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3225 * Issue a READ CAPACITY 16 command. This info is used to determine if
3226 * the LUN is formatted for EEDP support.
3228 ccb = xpt_alloc_ccb_nowait();
3230 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3235 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid)
3237 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3244 * If LUN is already in list, don't create a new one.
3247 SLIST_FOREACH(lun, &target->luns, lun_link) {
3248 if (lun->lun_id == lunid) {
3254 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3257 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3259 xpt_free_path(local_path);
3263 lun->lun_id = lunid;
3264 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3267 xpt_path_string(local_path, path_str, sizeof(path_str));
3268 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3269 path_str, target->handle);
3272 * Issue a READ CAPACITY 16 command for the LUN. The
3273 * mprsas_read_cap_done function will load the read cap info into the
3276 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3278 if (rcap_buf == NULL) {
3279 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3280 "buffer for EEDP support.\n");
3281 xpt_free_path(ccb->ccb_h.path);
3285 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3287 csio->ccb_h.func_code = XPT_SCSI_IO;
3288 csio->ccb_h.flags = CAM_DIR_IN;
3289 csio->ccb_h.retry_count = 4;
3290 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3291 csio->ccb_h.timeout = 60000;
3292 csio->data_ptr = (uint8_t *)rcap_buf;
3293 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3294 csio->sense_len = MPR_SENSE_LEN;
3295 csio->cdb_len = sizeof(*scsi_cmd);
3296 csio->tag_action = MSG_SIMPLE_Q_TAG;
3298 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3299 bzero(scsi_cmd, sizeof(*scsi_cmd));
3300 scsi_cmd->opcode = 0x9E;
3301 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3302 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3304 ccb->ccb_h.ppriv_ptr1 = sassc;
3309 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3311 struct mprsas_softc *sassc;
3312 struct mprsas_target *target;
3313 struct mprsas_lun *lun;
3314 struct scsi_read_capacity_eedp *rcap_buf;
3316 if (done_ccb == NULL)
3319 /* Driver need to release devq, it Scsi command is
3320 * generated by driver internally.
3321 * Currently there is a single place where driver
3322 * calls scsi command internally. In future if driver
3323 * calls more scsi command internally, it needs to release
3324 * devq internally, since those command will not go back to
3327 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3328 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3329 xpt_release_devq(done_ccb->ccb_h.path,
3330 /*count*/ 1, /*run_queue*/TRUE);
3333 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3336 * Get the LUN ID for the path and look it up in the LUN list for the
3339 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3340 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3341 ("Target %d out of bounds in mprsas_read_cap_done\n",
3342 done_ccb->ccb_h.target_id));
3343 target = &sassc->targets[done_ccb->ccb_h.target_id];
3344 SLIST_FOREACH(lun, &target->luns, lun_link) {
3345 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3349 * Got the LUN in the target's LUN list. Fill it in with EEDP
3350 * info. If the READ CAP 16 command had some SCSI error (common
3351 * if command is not supported), mark the lun as not supporting
3352 * EEDP and set the block size to 0.
3354 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3355 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3356 lun->eedp_formatted = FALSE;
3357 lun->eedp_block_size = 0;
3361 if (rcap_buf->protect & 0x01) {
3362 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3363 "%d is formatted for EEDP support.\n",
3364 done_ccb->ccb_h.target_lun,
3365 done_ccb->ccb_h.target_id);
3366 lun->eedp_formatted = TRUE;
3367 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3372 // Finished with this CCB and path.
3373 free(rcap_buf, M_MPR);
3374 xpt_free_path(done_ccb->ccb_h.path);
3375 xpt_free_ccb(done_ccb);
3377 #endif /* (__FreeBSD_version < 901503) || \
3378 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3381 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3382 struct mprsas_target *target, lun_id_t lun_id)
3388 * Set the INRESET flag for this target so that no I/O will be sent to
3389 * the target until the reset has completed. If an I/O request does
3390 * happen, the devq will be frozen. The CCB holds the path which is
3391 * used to release the devq. The devq is released and the CCB is freed
3392 * when the TM completes.
3394 ccb = xpt_alloc_ccb_nowait();
3396 path_id = cam_sim_path(sc->sassc->sim);
3397 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3398 target->tid, lun_id) != CAM_REQ_CMP) {
3402 tm->cm_targ = target;
3403 target->flags |= MPRSAS_TARGET_INRESET;
3409 mprsas_startup(struct mpr_softc *sc)
3412 * Send the port enable message and set the wait_for_port_enable flag.
3413 * This flag helps to keep the simq frozen until all discovery events
3416 sc->wait_for_port_enable = 1;
3417 mprsas_send_portenable(sc);
3422 mprsas_send_portenable(struct mpr_softc *sc)
3424 MPI2_PORT_ENABLE_REQUEST *request;
3425 struct mpr_command *cm;
3429 if ((cm = mpr_alloc_command(sc)) == NULL)
3431 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3432 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3433 request->MsgFlags = 0;
3435 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3436 cm->cm_complete = mprsas_portenable_complete;
3440 mpr_map_command(sc, cm);
3441 mpr_dprint(sc, MPR_XINFO,
3442 "mpr_send_portenable finished cm %p req %p complete %p\n",
3443 cm, cm->cm_req, cm->cm_complete);
3448 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3450 MPI2_PORT_ENABLE_REPLY *reply;
3451 struct mprsas_softc *sassc;
3457 * Currently there should be no way we can hit this case. It only
3458 * happens when we have a failure to allocate chain frames, and
3459 * port enable commands don't have S/G lists.
3461 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3462 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3463 "This should not happen!\n", __func__, cm->cm_flags);
3466 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3468 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3469 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3470 MPI2_IOCSTATUS_SUCCESS)
3471 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3473 mpr_free_command(sc, cm);
3474 if (sc->mpr_ich.ich_arg != NULL) {
3475 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3476 config_intrhook_disestablish(&sc->mpr_ich);
3477 sc->mpr_ich.ich_arg = NULL;
3481 * Done waiting for port enable to complete. Decrement the refcount.
3482 * If refcount is 0, discovery is complete and a rescan of the bus can
3485 sc->wait_for_port_enable = 0;
3486 sc->port_enable_complete = 1;
3487 wakeup(&sc->port_enable_complete);
3488 mprsas_startup_decrement(sassc);
3492 mprsas_check_id(struct mprsas_softc *sassc, int id)
3494 struct mpr_softc *sc = sassc->sc;
3498 ids = &sc->exclude_ids[0];
3499 while((name = strsep(&ids, ",")) != NULL) {
3500 if (name[0] == '\0')
3502 if (strtol(name, NULL, 0) == (long)id)
3510 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3512 struct mprsas_softc *sassc;
3513 struct mprsas_lun *lun, *lun_tmp;
3514 struct mprsas_target *targ;
3519 * The number of targets is based on IOC Facts, so free all of
3520 * the allocated LUNs for each target and then the target buffer
3523 for (i=0; i< maxtargets; i++) {
3524 targ = &sassc->targets[i];
3525 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3529 free(sassc->targets, M_MPR);
3531 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3532 M_MPR, M_WAITOK|M_ZERO);
3533 if (!sassc->targets) {
3534 panic("%s failed to alloc targets with error %d\n",