2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 /* Communications core for Avago Technologies (LSI) MPT3 */
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
48 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
57 #include <machine/bus.h>
58 #include <machine/resource.h>
61 #include <machine/stdarg.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #include <cam/scsi/smp_all.h>
74 #include <dev/nvme/nvme.h>
76 #include <dev/mpr/mpi/mpi2_type.h>
77 #include <dev/mpr/mpi/mpi2.h>
78 #include <dev/mpr/mpi/mpi2_ioc.h>
79 #include <dev/mpr/mpi/mpi2_sas.h>
80 #include <dev/mpr/mpi/mpi2_pci.h>
81 #include <dev/mpr/mpi/mpi2_cnfg.h>
82 #include <dev/mpr/mpi/mpi2_init.h>
83 #include <dev/mpr/mpi/mpi2_tool.h>
84 #include <dev/mpr/mpr_ioctl.h>
85 #include <dev/mpr/mprvar.h>
86 #include <dev/mpr/mpr_table.h>
87 #include <dev/mpr/mpr_sas.h>
89 #define MPRSAS_DISCOVERY_TIMEOUT 20
90 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
93 * static array to check SCSI OpCode for EEDP protection bits
95 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
96 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
97 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 static uint8_t op_code_prot[256] = {
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
108 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
117 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
119 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
120 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
122 static void mprsas_poll(struct cam_sim *sim);
123 static void mprsas_scsiio_timeout(void *data);
124 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
125 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
126 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
127 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
128 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
129 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
130 struct mpr_command *cm);
131 static void mprsas_async(void *callback_arg, uint32_t code,
132 struct cam_path *path, void *arg);
133 static int mprsas_send_portenable(struct mpr_softc *sc);
134 static void mprsas_portenable_complete(struct mpr_softc *sc,
135 struct mpr_command *cm);
137 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
138 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
140 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
142 struct mprsas_target *
143 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
146 struct mprsas_target *target;
149 for (i = start; i < sassc->maxtargets; i++) {
150 target = &sassc->targets[i];
151 if (target->handle == handle)
158 /* we need to freeze the simq during attach and diag reset, to avoid failing
159 * commands before device handles have been found by discovery. Since
160 * discovery involves reading config pages and possibly sending commands,
161 * discovery actions may continue even after we receive the end of discovery
162 * event, so refcount discovery actions instead of assuming we can unfreeze
163 * the simq when we get the event.
166 mprsas_startup_increment(struct mprsas_softc *sassc)
168 MPR_FUNCTRACE(sassc->sc);
170 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
171 if (sassc->startup_refcount++ == 0) {
172 /* just starting, freeze the simq */
173 mpr_dprint(sassc->sc, MPR_INIT,
174 "%s freezing simq\n", __func__);
176 xpt_freeze_simq(sassc->sim, 1);
178 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
179 sassc->startup_refcount);
184 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
186 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
187 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
188 xpt_release_simq(sassc->sim, 1);
189 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
194 mprsas_startup_decrement(struct mprsas_softc *sassc)
196 MPR_FUNCTRACE(sassc->sc);
198 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
199 if (--sassc->startup_refcount == 0) {
200 /* finished all discovery-related actions, release
201 * the simq and rescan for the latest topology.
203 mpr_dprint(sassc->sc, MPR_INIT,
204 "%s releasing simq\n", __func__);
205 sassc->flags &= ~MPRSAS_IN_STARTUP;
206 xpt_release_simq(sassc->sim, 1);
209 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
210 sassc->startup_refcount);
215 * The firmware requires us to stop sending commands when we're doing task
218 * XXX The logic for serializing the device has been made lazy and moved to
219 * mprsas_prepare_for_tm().
222 mprsas_alloc_tm(struct mpr_softc *sc)
224 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
225 struct mpr_command *tm;
228 tm = mpr_alloc_high_priority_command(sc);
232 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
233 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
238 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
240 int target_id = 0xFFFFFFFF;
247 * For TM's the devq is frozen for the device. Unfreeze it here and
248 * free the resources used for freezing the devq. Must clear the
249 * INRESET flag as well or scsi I/O will not work.
251 if (tm->cm_targ != NULL) {
252 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
253 target_id = tm->cm_targ->tid;
256 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
258 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
259 xpt_free_path(tm->cm_ccb->ccb_h.path);
260 xpt_free_ccb(tm->cm_ccb);
263 mpr_free_high_priority_command(sc, tm);
267 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
269 struct mprsas_softc *sassc = sc->sassc;
271 target_id_t targetid;
275 pathid = cam_sim_path(sassc->sim);
277 targetid = CAM_TARGET_WILDCARD;
279 targetid = targ - sassc->targets;
282 * Allocate a CCB and schedule a rescan.
284 ccb = xpt_alloc_ccb_nowait();
286 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
290 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
291 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
292 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
297 if (targetid == CAM_TARGET_WILDCARD)
298 ccb->ccb_h.func_code = XPT_SCAN_BUS;
300 ccb->ccb_h.func_code = XPT_SCAN_TGT;
302 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
307 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
317 /* No need to be in here if debugging isn't enabled */
318 if ((cm->cm_sc->mpr_debug & level) == 0)
321 sbuf_new(&sb, str, sizeof(str), 0);
325 if (cm->cm_ccb != NULL) {
326 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
328 sbuf_cat(&sb, path_str);
329 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
330 scsi_command_string(&cm->cm_ccb->csio, &sb);
331 sbuf_printf(&sb, "length %d ",
332 cm->cm_ccb->csio.dxfer_len);
335 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
336 cam_sim_name(cm->cm_sc->sassc->sim),
337 cam_sim_unit(cm->cm_sc->sassc->sim),
338 cam_sim_bus(cm->cm_sc->sassc->sim),
339 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
343 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
344 sbuf_vprintf(&sb, fmt, ap);
346 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
352 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
354 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
355 struct mprsas_target *targ;
360 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
361 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
365 /* XXX retry the remove after the diag reset completes? */
366 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
367 "0x%04x\n", __func__, handle);
368 mprsas_free_tm(sc, tm);
372 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
373 MPI2_IOCSTATUS_SUCCESS) {
374 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
375 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
378 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
379 le32toh(reply->TerminationCount));
380 mpr_free_reply(sc, tm->cm_reply_data);
381 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
383 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
387 * Don't clear target if remove fails because things will get confusing.
388 * Leave the devname and sasaddr intact so that we know to avoid reusing
389 * this target id if possible, and so we can assign the same target id
390 * to this device if it comes back in the future.
392 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
393 MPI2_IOCSTATUS_SUCCESS) {
396 targ->encl_handle = 0x0;
397 targ->encl_level_valid = 0x0;
398 targ->encl_level = 0x0;
399 targ->connector_name[0] = ' ';
400 targ->connector_name[1] = ' ';
401 targ->connector_name[2] = ' ';
402 targ->connector_name[3] = ' ';
403 targ->encl_slot = 0x0;
404 targ->exp_dev_handle = 0x0;
406 targ->linkrate = 0x0;
409 targ->scsi_req_desc_type = 0;
412 mprsas_free_tm(sc, tm);
417 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
418 * Otherwise Volume Delete is same as Bare Drive Removal.
421 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
423 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
424 struct mpr_softc *sc;
425 struct mpr_command *cm;
426 struct mprsas_target *targ = NULL;
428 MPR_FUNCTRACE(sassc->sc);
431 targ = mprsas_find_target_by_handle(sassc, 0, handle);
433 /* FIXME: what is the action? */
434 /* We don't know about this device? */
435 mpr_dprint(sc, MPR_ERROR,
436 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
440 targ->flags |= MPRSAS_TARGET_INREMOVAL;
442 cm = mprsas_alloc_tm(sc);
444 mpr_dprint(sc, MPR_ERROR,
445 "%s: command alloc failure\n", __func__);
449 mprsas_rescan_target(sc, targ);
451 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
452 req->DevHandle = targ->handle;
453 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
455 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
456 /* SAS Hard Link Reset / SATA Link Reset */
457 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
459 /* PCIe Protocol Level Reset*/
461 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
466 cm->cm_complete = mprsas_remove_volume;
467 cm->cm_complete_data = (void *)(uintptr_t)handle;
469 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
470 __func__, targ->tid);
471 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
473 mpr_map_command(sc, cm);
477 * The firmware performs debounce on the link to avoid transient link errors
478 * and false removals. When it does decide that link has been lost and a
479 * device needs to go away, it expects that the host will perform a target reset
480 * and then an op remove. The reset has the side-effect of aborting any
481 * outstanding requests for the device, which is required for the op-remove to
482 * succeed. It's not clear if the host should check for the device coming back
483 * alive after the reset.
486 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
488 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
489 struct mpr_softc *sc;
490 struct mpr_command *tm;
491 struct mprsas_target *targ = NULL;
493 MPR_FUNCTRACE(sassc->sc);
497 targ = mprsas_find_target_by_handle(sassc, 0, handle);
499 /* FIXME: what is the action? */
500 /* We don't know about this device? */
501 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
506 targ->flags |= MPRSAS_TARGET_INREMOVAL;
508 tm = mprsas_alloc_tm(sc);
510 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
515 mprsas_rescan_target(sc, targ);
517 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
518 req->DevHandle = htole16(targ->handle);
519 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
521 /* SAS Hard Link Reset / SATA Link Reset */
522 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
526 tm->cm_complete = mprsas_remove_device;
527 tm->cm_complete_data = (void *)(uintptr_t)handle;
529 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
530 __func__, targ->tid);
531 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
533 mpr_map_command(sc, tm);
537 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
539 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
540 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
541 struct mprsas_target *targ;
546 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
547 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
551 * Currently there should be no way we can hit this case. It only
552 * happens when we have a failure to allocate chain frames, and
553 * task management commands don't have S/G lists.
555 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
556 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
557 "handle %#04x! This should not happen!\n", __func__,
558 tm->cm_flags, handle);
562 /* XXX retry the remove after the diag reset completes? */
563 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
564 "0x%04x\n", __func__, handle);
565 mprsas_free_tm(sc, tm);
569 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
570 MPI2_IOCSTATUS_SUCCESS) {
571 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
572 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
575 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
576 le32toh(reply->TerminationCount));
577 mpr_free_reply(sc, tm->cm_reply_data);
578 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
580 /* Reuse the existing command */
581 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
582 memset(req, 0, sizeof(*req));
583 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
584 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
585 req->DevHandle = htole16(handle);
587 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
588 tm->cm_complete = mprsas_remove_complete;
589 tm->cm_complete_data = (void *)(uintptr_t)handle;
592 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
593 * They should be aborted or time out and we'll kick thus off there
596 if (TAILQ_FIRST(&targ->commands) == NULL) {
597 mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n");
598 mpr_map_command(sc, tm);
599 targ->pending_remove_tm = NULL;
601 targ->pending_remove_tm = tm;
604 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
606 if (targ->encl_level_valid) {
607 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 targ->connector_name);
614 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
616 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
618 struct mprsas_target *targ;
619 struct mprsas_lun *lun;
623 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
624 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
629 * At this point, we should have no pending commands for the target.
630 * The remove target has just completed.
632 KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
633 ("%s: no commands should be pending\n", __func__));
636 * Currently there should be no way we can hit this case. It only
637 * happens when we have a failure to allocate chain frames, and
638 * task management commands don't have S/G lists.
640 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
641 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
642 "handle %#04x! This should not happen!\n", __func__,
643 tm->cm_flags, handle);
644 mprsas_free_tm(sc, tm);
649 /* most likely a chip reset */
650 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
651 "0x%04x\n", __func__, handle);
652 mprsas_free_tm(sc, tm);
656 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
657 __func__, handle, le16toh(reply->IOCStatus));
660 * Don't clear target if remove fails because things will get confusing.
661 * Leave the devname and sasaddr intact so that we know to avoid reusing
662 * this target id if possible, and so we can assign the same target id
663 * to this device if it comes back in the future.
665 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
666 MPI2_IOCSTATUS_SUCCESS) {
668 targ->encl_handle = 0x0;
669 targ->encl_level_valid = 0x0;
670 targ->encl_level = 0x0;
671 targ->connector_name[0] = ' ';
672 targ->connector_name[1] = ' ';
673 targ->connector_name[2] = ' ';
674 targ->connector_name[3] = ' ';
675 targ->encl_slot = 0x0;
676 targ->exp_dev_handle = 0x0;
678 targ->linkrate = 0x0;
681 targ->scsi_req_desc_type = 0;
683 while (!SLIST_EMPTY(&targ->luns)) {
684 lun = SLIST_FIRST(&targ->luns);
685 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
690 mprsas_free_tm(sc, tm);
694 mprsas_register_events(struct mpr_softc *sc)
699 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 setbit(events, MPI2_EVENT_IR_VOLUME);
708 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
712 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
713 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
714 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
715 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
716 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
717 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
721 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
722 &sc->sassc->mprsas_eh);
728 mpr_attach_sas(struct mpr_softc *sc)
730 struct mprsas_softc *sassc;
732 int unit, error = 0, reqs;
735 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
737 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
740 * XXX MaxTargets could change during a reinit. Since we don't
741 * resize the targets[] array during such an event, cache the value
742 * of MaxTargets here so that we don't get into trouble later. This
743 * should move into the reinit logic.
745 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
746 sassc->targets = malloc(sizeof(struct mprsas_target) *
747 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
751 reqs = sc->num_reqs - sc->num_prireqs - 1;
752 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
753 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
758 unit = device_get_unit(sc->mpr_dev);
759 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
760 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
761 if (sassc->sim == NULL) {
762 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
767 TAILQ_INIT(&sassc->ev_queue);
769 /* Initialize taskqueue for Event Handling */
770 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
771 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
772 taskqueue_thread_enqueue, &sassc->ev_tq);
773 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
774 device_get_nameunit(sc->mpr_dev));
779 * XXX There should be a bus for every port on the adapter, but since
780 * we're just going to fake the topology for now, we'll pretend that
781 * everything is just a target on a single bus.
783 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
784 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
785 "Error %d registering SCSI bus\n", error);
791 * Assume that discovery events will start right away.
793 * Hold off boot until discovery is complete.
795 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
796 sc->sassc->startup_refcount = 0;
797 mprsas_startup_increment(sassc);
799 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
802 * Register for async events so we can determine the EEDP
803 * capabilities of devices.
805 status = xpt_create_path(&sassc->path, /*periph*/NULL,
806 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
808 if (status != CAM_REQ_CMP) {
809 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
810 "Error %#x creating sim path\n", status);
815 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
816 status = xpt_register_async(event, mprsas_async, sc,
819 if (status != CAM_REQ_CMP) {
820 mpr_dprint(sc, MPR_ERROR,
821 "Error %#x registering async handler for "
822 "AC_ADVINFO_CHANGED events\n", status);
823 xpt_free_path(sassc->path);
827 if (status != CAM_REQ_CMP) {
829 * EEDP use is the exception, not the rule.
830 * Warn the user, but do not fail to attach.
832 mpr_printf(sc, "EEDP capabilities disabled.\n");
837 mprsas_register_events(sc);
842 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
847 mpr_detach_sas(struct mpr_softc *sc)
849 struct mprsas_softc *sassc;
850 struct mprsas_lun *lun, *lun_tmp;
851 struct mprsas_target *targ;
856 if (sc->sassc == NULL)
860 mpr_deregister_events(sc, sassc->mprsas_eh);
863 * Drain and free the event handling taskqueue with the lock
864 * unheld so that any parallel processing tasks drain properly
865 * without deadlocking.
867 if (sassc->ev_tq != NULL)
868 taskqueue_free(sassc->ev_tq);
870 /* Make sure CAM doesn't wedge if we had to bail out early. */
873 while (sassc->startup_refcount != 0)
874 mprsas_startup_decrement(sassc);
876 /* Deregister our async handler */
877 if (sassc->path != NULL) {
878 xpt_register_async(0, mprsas_async, sc, sassc->path);
879 xpt_free_path(sassc->path);
883 if (sassc->flags & MPRSAS_IN_STARTUP)
884 xpt_release_simq(sassc->sim, 1);
886 if (sassc->sim != NULL) {
887 xpt_bus_deregister(cam_sim_path(sassc->sim));
888 cam_sim_free(sassc->sim, FALSE);
893 if (sassc->devq != NULL)
894 cam_simq_free(sassc->devq);
896 for (i = 0; i < sassc->maxtargets; i++) {
897 targ = &sassc->targets[i];
898 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
902 free(sassc->targets, M_MPR);
910 mprsas_discovery_end(struct mprsas_softc *sassc)
912 struct mpr_softc *sc = sassc->sc;
916 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
917 callout_stop(&sassc->discovery_callout);
920 * After discovery has completed, check the mapping table for any
921 * missing devices and update their missing counts. Only do this once
922 * whenever the driver is initialized so that missing counts aren't
923 * updated unnecessarily. Note that just because discovery has
924 * completed doesn't mean that events have been processed yet. The
925 * check_devices function is a callout timer that checks if ALL devices
926 * are missing. If so, it will wait a little longer for events to
927 * complete and keep resetting itself until some device in the mapping
928 * table is not missing, meaning that event processing has started.
930 if (sc->track_mapping_events) {
931 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
932 "completed. Check for missing devices in the mapping "
934 callout_reset(&sc->device_check_callout,
935 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
941 mprsas_action(struct cam_sim *sim, union ccb *ccb)
943 struct mprsas_softc *sassc;
945 sassc = cam_sim_softc(sim);
947 MPR_FUNCTRACE(sassc->sc);
948 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
949 ccb->ccb_h.func_code);
950 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
952 switch (ccb->ccb_h.func_code) {
955 struct ccb_pathinq *cpi = &ccb->cpi;
956 struct mpr_softc *sc = sassc->sc;
958 cpi->version_num = 1;
959 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
960 cpi->target_sprt = 0;
961 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
962 cpi->hba_eng_cnt = 0;
963 cpi->max_target = sassc->maxtargets - 1;
967 * initiator_id is set here to an ID outside the set of valid
968 * target IDs (including volumes).
970 cpi->initiator_id = sassc->maxtargets;
971 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
972 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
973 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
974 cpi->unit_number = cam_sim_unit(sim);
975 cpi->bus_id = cam_sim_bus(sim);
977 * XXXSLM-I think this needs to change based on config page or
978 * something instead of hardcoded to 150000.
980 cpi->base_transfer_speed = 150000;
981 cpi->transport = XPORT_SAS;
982 cpi->transport_version = 0;
983 cpi->protocol = PROTO_SCSI;
984 cpi->protocol_version = SCSI_REV_SPC;
985 cpi->maxio = sc->maxio;
986 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
989 case XPT_GET_TRAN_SETTINGS:
991 struct ccb_trans_settings *cts;
992 struct ccb_trans_settings_sas *sas;
993 struct ccb_trans_settings_scsi *scsi;
994 struct mprsas_target *targ;
997 sas = &cts->xport_specific.sas;
998 scsi = &cts->proto_specific.scsi;
1000 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1001 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1002 cts->ccb_h.target_id));
1003 targ = &sassc->targets[cts->ccb_h.target_id];
1004 if (targ->handle == 0x0) {
1005 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1009 cts->protocol_version = SCSI_REV_SPC2;
1010 cts->transport = XPORT_SAS;
1011 cts->transport_version = 0;
1013 sas->valid = CTS_SAS_VALID_SPEED;
1014 switch (targ->linkrate) {
1016 sas->bitrate = 150000;
1019 sas->bitrate = 300000;
1022 sas->bitrate = 600000;
1025 sas->bitrate = 1200000;
1031 cts->protocol = PROTO_SCSI;
1032 scsi->valid = CTS_SCSI_VALID_TQ;
1033 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1035 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1038 case XPT_CALC_GEOMETRY:
1039 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1040 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1043 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1045 mprsas_action_resetdev(sassc, ccb);
1050 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1051 "for abort or reset\n");
1052 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1055 mprsas_action_scsiio(sassc, ccb);
1058 mprsas_action_smpio(sassc, ccb);
1061 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1069 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1070 target_id_t target_id, lun_id_t lun_id)
1072 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1073 struct cam_path *path;
1075 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1076 ac_code, target_id, (uintmax_t)lun_id);
1078 if (xpt_create_path(&path, NULL,
1079 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1080 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1085 xpt_async(ac_code, path, NULL);
1086 xpt_free_path(path);
1090 mprsas_complete_all_commands(struct mpr_softc *sc)
1092 struct mpr_command *cm;
1097 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1099 /* complete all commands with a NULL reply */
1100 for (i = 1; i < sc->num_reqs; i++) {
1101 cm = &sc->commands[i];
1102 if (cm->cm_state == MPR_CM_STATE_FREE)
1105 cm->cm_state = MPR_CM_STATE_BUSY;
1106 cm->cm_reply = NULL;
1109 if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1111 free(cm->cm_data, M_MPR);
1115 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1116 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1118 if (cm->cm_complete != NULL) {
1119 mprsas_log_command(cm, MPR_RECOVERY,
1120 "completing cm %p state %x ccb %p for diag reset\n",
1121 cm, cm->cm_state, cm->cm_ccb);
1122 cm->cm_complete(sc, cm);
1124 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1125 mprsas_log_command(cm, MPR_RECOVERY,
1126 "waking up cm %p state %x ccb %p for diag reset\n",
1127 cm, cm->cm_state, cm->cm_ccb);
1132 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1133 /* this should never happen, but if it does, log */
1134 mprsas_log_command(cm, MPR_RECOVERY,
1135 "cm %p state %x flags 0x%x ccb %p during diag "
1136 "reset\n", cm, cm->cm_state, cm->cm_flags,
1141 sc->io_cmds_active = 0;
1145 mprsas_handle_reinit(struct mpr_softc *sc)
1149 /* Go back into startup mode and freeze the simq, so that CAM
1150 * doesn't send any commands until after we've rediscovered all
1151 * targets and found the proper device handles for them.
1153 * After the reset, portenable will trigger discovery, and after all
1154 * discovery-related activities have finished, the simq will be
1157 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1158 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1159 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1160 mprsas_startup_increment(sc->sassc);
1162 /* notify CAM of a bus reset */
1163 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1166 /* complete and cleanup after all outstanding commands */
1167 mprsas_complete_all_commands(sc);
1169 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1170 __func__, sc->sassc->startup_refcount);
1172 /* zero all the target handles, since they may change after the
1173 * reset, and we have to rediscover all the targets and use the new
1176 for (i = 0; i < sc->sassc->maxtargets; i++) {
1177 if (sc->sassc->targets[i].outstanding != 0)
1178 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1179 i, sc->sassc->targets[i].outstanding);
1180 sc->sassc->targets[i].handle = 0x0;
1181 sc->sassc->targets[i].exp_dev_handle = 0x0;
1182 sc->sassc->targets[i].outstanding = 0;
1183 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1187 mprsas_tm_timeout(void *data)
1189 struct mpr_command *tm = data;
1190 struct mpr_softc *sc = tm->cm_sc;
1192 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1194 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1197 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1198 ("command not inqueue\n"));
1200 tm->cm_state = MPR_CM_STATE_BUSY;
1205 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1207 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1208 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1209 unsigned int cm_count = 0;
1210 struct mpr_command *cm;
1211 struct mprsas_target *targ;
1213 callout_stop(&tm->cm_callout);
1215 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1216 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1220 * Currently there should be no way we can hit this case. It only
1221 * happens when we have a failure to allocate chain frames, and
1222 * task management commands don't have S/G lists.
1224 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1225 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1226 "%s: cm_flags = %#x for LUN reset! "
1227 "This should not happen!\n", __func__, tm->cm_flags);
1228 mprsas_free_tm(sc, tm);
1232 if (reply == NULL) {
1233 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1235 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1236 /* this completion was due to a reset, just cleanup */
1237 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1238 "reset, ignoring NULL LUN reset reply\n");
1240 mprsas_free_tm(sc, tm);
1243 /* we should have gotten a reply. */
1244 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1245 "LUN reset attempt, resetting controller\n");
1251 mpr_dprint(sc, MPR_RECOVERY,
1252 "logical unit reset status 0x%x code 0x%x count %u\n",
1253 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1254 le32toh(reply->TerminationCount));
1257 * See if there are any outstanding commands for this LUN.
1258 * This could be made more efficient by using a per-LU data
1259 * structure of some sort.
1261 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1262 if (cm->cm_lun == tm->cm_lun)
1266 if (cm_count == 0) {
1267 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1268 "Finished recovery after LUN reset for target %u\n",
1271 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1275 * We've finished recovery for this logical unit. check and
1276 * see if some other logical unit has a timedout command
1277 * that needs to be processed.
1279 cm = TAILQ_FIRST(&targ->timedout_commands);
1281 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1282 "More commands to abort for target %u\n", targ->tid);
1283 mprsas_send_abort(sc, tm, cm);
1286 mprsas_free_tm(sc, tm);
1289 /* if we still have commands for this LUN, the reset
1290 * effectively failed, regardless of the status reported.
1291 * Escalate to a target reset.
1293 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1294 "logical unit reset complete for target %u, but still "
1295 "have %u command(s), sending target reset\n", targ->tid,
1297 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1298 mprsas_send_reset(sc, tm,
1299 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1306 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1308 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1309 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1310 struct mprsas_target *targ;
1312 callout_stop(&tm->cm_callout);
1314 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1315 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1319 * Currently there should be no way we can hit this case. It only
1320 * happens when we have a failure to allocate chain frames, and
1321 * task management commands don't have S/G lists.
1323 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1324 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1325 "reset! This should not happen!\n", __func__, tm->cm_flags);
1326 mprsas_free_tm(sc, tm);
1330 if (reply == NULL) {
1331 mpr_dprint(sc, MPR_RECOVERY,
1332 "NULL target reset reply for tm %p TaskMID %u\n",
1333 tm, le16toh(req->TaskMID));
1334 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1335 /* this completion was due to a reset, just cleanup */
1336 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1337 "reset, ignoring NULL target reset reply\n");
1339 mprsas_free_tm(sc, tm);
1342 /* we should have gotten a reply. */
1343 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1344 "target reset attempt, resetting controller\n");
1350 mpr_dprint(sc, MPR_RECOVERY,
1351 "target reset status 0x%x code 0x%x count %u\n",
1352 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1353 le32toh(reply->TerminationCount));
1355 if (targ->outstanding == 0) {
1357 * We've finished recovery for this target and all
1358 * of its logical units.
1360 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1361 "Finished reset recovery for target %u\n", targ->tid);
1363 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1367 mprsas_free_tm(sc, tm);
1370 * After a target reset, if this target still has
1371 * outstanding commands, the reset effectively failed,
1372 * regardless of the status reported. escalate.
1374 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1375 "Target reset complete for target %u, but still have %u "
1376 "command(s), resetting controller\n", targ->tid,
1382 #define MPR_RESET_TIMEOUT 30
1385 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1387 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1388 struct mprsas_target *target;
1391 target = tm->cm_targ;
1392 if (target->handle == 0) {
1393 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1394 "%d\n", __func__, target->tid);
1398 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1399 req->DevHandle = htole16(target->handle);
1400 req->TaskType = type;
1402 if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1403 timeout = MPR_RESET_TIMEOUT;
1405 * Target reset method =
1406 * SAS Hard Link Reset / SATA Link Reset
1408 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1410 timeout = (target->controller_reset_timeout) ? (
1411 target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1412 /* PCIe Protocol Level Reset*/
1414 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1417 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1418 /* XXX Need to handle invalid LUNs */
1419 MPR_SET_LUN(req->LUN, tm->cm_lun);
1420 tm->cm_targ->logical_unit_resets++;
1421 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1422 "Sending logical unit reset to target %u lun %d\n",
1423 target->tid, tm->cm_lun);
1424 tm->cm_complete = mprsas_logical_unit_reset_complete;
1425 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1426 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1427 tm->cm_targ->target_resets++;
1428 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1429 "Sending target reset to target %u\n", target->tid);
1430 tm->cm_complete = mprsas_target_reset_complete;
1431 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1434 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1438 if (target->encl_level_valid) {
1439 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1440 "At enclosure level %d, slot %d, connector name (%4s)\n",
1441 target->encl_level, target->encl_slot,
1442 target->connector_name);
1446 tm->cm_complete_data = (void *)tm;
1448 callout_reset(&tm->cm_callout, timeout * hz,
1449 mprsas_tm_timeout, tm);
1451 err = mpr_map_command(sc, tm);
1453 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1454 "error %d sending reset type %u\n", err, type);
1461 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1463 struct mpr_command *cm;
1464 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1465 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1466 struct mprsas_target *targ;
1468 callout_stop(&tm->cm_callout);
1470 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1471 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1475 * Currently there should be no way we can hit this case. It only
1476 * happens when we have a failure to allocate chain frames, and
1477 * task management commands don't have S/G lists.
1479 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1480 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1481 "cm_flags = %#x for abort %p TaskMID %u!\n",
1482 tm->cm_flags, tm, le16toh(req->TaskMID));
1483 mprsas_free_tm(sc, tm);
1487 if (reply == NULL) {
1488 mpr_dprint(sc, MPR_RECOVERY,
1489 "NULL abort reply for tm %p TaskMID %u\n",
1490 tm, le16toh(req->TaskMID));
1491 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1492 /* this completion was due to a reset, just cleanup */
1493 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1494 "reset, ignoring NULL abort reply\n");
1496 mprsas_free_tm(sc, tm);
1498 /* we should have gotten a reply. */
1499 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1500 "abort attempt, resetting controller\n");
1506 mpr_dprint(sc, MPR_RECOVERY,
1507 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1508 le16toh(req->TaskMID),
1509 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1510 le32toh(reply->TerminationCount));
1512 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1515 * if there are no more timedout commands, we're done with
1516 * error recovery for this target.
1518 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1519 "Finished abort recovery for target %u\n", targ->tid);
1521 mprsas_free_tm(sc, tm);
1522 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1523 /* abort success, but we have more timedout commands to abort */
1524 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1525 "Continuing abort recovery for target %u\n", targ->tid);
1526 mprsas_send_abort(sc, tm, cm);
1529 * we didn't get a command completion, so the abort
1530 * failed as far as we're concerned. escalate.
1532 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1533 "Abort failed for target %u, sending logical unit reset\n",
1536 mprsas_send_reset(sc, tm,
1537 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1541 #define MPR_ABORT_TIMEOUT 5
1544 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1545 struct mpr_command *cm)
1547 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1548 struct mprsas_target *targ;
1552 if (targ->handle == 0) {
1553 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1554 "%s null devhandle for target_id %d\n",
1555 __func__, cm->cm_ccb->ccb_h.target_id);
1559 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1560 "Aborting command %p\n", cm);
1562 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1563 req->DevHandle = htole16(targ->handle);
1564 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1566 /* XXX Need to handle invalid LUNs */
1567 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1569 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1572 tm->cm_complete = mprsas_abort_complete;
1573 tm->cm_complete_data = (void *)tm;
1574 tm->cm_targ = cm->cm_targ;
1575 tm->cm_lun = cm->cm_lun;
1577 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1578 timeout = MPR_ABORT_TIMEOUT;
1580 timeout = sc->nvme_abort_timeout;
1582 callout_reset(&tm->cm_callout, timeout * hz,
1583 mprsas_tm_timeout, tm);
1587 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1589 err = mpr_map_command(sc, tm);
1591 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1592 "error %d sending abort for cm %p SMID %u\n",
1593 err, cm, req->TaskMID);
1598 mprsas_scsiio_timeout(void *data)
1600 sbintime_t elapsed, now;
1602 struct mpr_softc *sc;
1603 struct mpr_command *cm;
1604 struct mprsas_target *targ;
1606 cm = (struct mpr_command *)data;
1612 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1614 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1617 * Run the interrupt handler to make sure it's not pending. This
1618 * isn't perfect because the command could have already completed
1619 * and been re-used, though this is unlikely.
1621 mpr_intr_locked(sc);
1622 if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1623 mprsas_log_command(cm, MPR_XINFO,
1624 "SCSI command %p almost timed out\n", cm);
1628 if (cm->cm_ccb == NULL) {
1629 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1636 elapsed = now - ccb->ccb_h.qos.sim_data;
1637 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1638 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1639 targ->tid, targ->handle, ccb->ccb_h.timeout,
1640 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1641 if (targ->encl_level_valid) {
1642 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1643 "At enclosure level %d, slot %d, connector name (%4s)\n",
1644 targ->encl_level, targ->encl_slot, targ->connector_name);
1647 /* XXX first, check the firmware state, to see if it's still
1648 * operational. if not, do a diag reset.
1650 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1651 cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1652 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1654 if (targ->tm != NULL) {
1655 /* target already in recovery, just queue up another
1656 * timedout command to be processed later.
1658 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1659 "processing by tm %p\n", cm, targ->tm);
1661 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1663 /* start recovery by aborting the first timedout command */
1664 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1665 "Sending abort to target %u for SMID %d\n", targ->tid,
1666 cm->cm_desc.Default.SMID);
1667 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1669 mprsas_send_abort(sc, targ->tm, cm);
1672 /* XXX queue this target up for recovery once a TM becomes
1673 * available. The firmware only has a limited number of
1674 * HighPriority credits for the high priority requests used
1675 * for task management, and we ran out.
1677 * Isilon: don't worry about this for now, since we have
1678 * more credits than disks in an enclosure, and limit
1679 * ourselves to one TM per target for recovery.
1681 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1682 "timedout cm %p failed to allocate a tm\n", cm);
1687 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1689 * Return 0 - for success,
1690 * 1 - to immediately return back the command with success status to CAM
1691 * negative value - to fallback to firmware path i.e. issue scsi unmap
1692 * to FW without any translation.
1695 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1696 union ccb *ccb, struct mprsas_target *targ)
1698 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1699 struct ccb_scsiio *csio;
1700 struct unmap_parm_list *plist;
1701 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1702 struct nvme_command *c;
1704 uint16_t ndesc, list_len, data_length;
1705 struct mpr_prp_page *prp_page_info;
1706 uint64_t nvme_dsm_ranges_dma_handle;
1709 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1711 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1715 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1717 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1718 "save UNMAP data\n");
1722 /* Copy SCSI unmap data to a local buffer */
1723 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1725 /* return back the unmap command to CAM with success status,
1726 * if number of descripts is zero.
1728 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1730 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1731 "UNMAP cmd is Zero\n");
1736 data_length = ndesc * sizeof(struct nvme_dsm_range);
1737 if (data_length > targ->MDTS) {
1738 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1739 "Device's MDTS: %d\n", data_length, targ->MDTS);
1744 prp_page_info = mpr_alloc_prp_page(sc);
1745 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1746 "UNMAP command.\n", __func__));
1749 * Insert the allocated PRP page into the command's PRP page list. This
1750 * will be freed when the command is freed.
1752 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1754 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1755 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1757 bzero(nvme_dsm_ranges, data_length);
1759 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1760 * for each descriptors contained in SCSI UNMAP data.
1762 for (i = 0; i < ndesc; i++) {
1763 nvme_dsm_ranges[i].length =
1764 htole32(be32toh(plist->desc[i].nlb));
1765 nvme_dsm_ranges[i].starting_lba =
1766 htole64(be64toh(plist->desc[i].slba));
1767 nvme_dsm_ranges[i].attributes = 0;
1770 /* Build MPI2.6's NVMe Encapsulated Request Message */
1771 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1772 bzero(req, sizeof(*req));
1773 req->DevHandle = htole16(targ->handle);
1774 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1775 req->Flags = MPI26_NVME_FLAGS_WRITE;
1776 req->ErrorResponseBaseAddress.High =
1777 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1778 req->ErrorResponseBaseAddress.Low =
1779 htole32(cm->cm_sense_busaddr);
1780 req->ErrorResponseAllocationLength =
1781 htole16(sizeof(struct nvme_completion));
1782 req->EncapsulatedCommandLength =
1783 htole16(sizeof(struct nvme_command));
1784 req->DataLength = htole32(data_length);
1786 /* Build NVMe DSM command */
1787 c = (struct nvme_command *) req->NVMe_Command;
1788 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1789 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1790 c->cdw10 = htole32(ndesc - 1);
1791 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1793 cm->cm_length = data_length;
1796 cm->cm_complete = mprsas_scsiio_complete;
1797 cm->cm_complete_data = ccb;
1799 cm->cm_lun = csio->ccb_h.target_lun;
1802 cm->cm_desc.Default.RequestFlags =
1803 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1805 csio->ccb_h.qos.sim_data = sbinuptime();
1806 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1807 mprsas_scsiio_timeout, cm, 0);
1810 targ->outstanding++;
1811 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1812 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1814 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1815 __func__, cm, ccb, targ->outstanding);
1817 mpr_build_nvme_prp(sc, cm, req,
1818 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1819 mpr_map_command(sc, cm);
1827 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1829 MPI2_SCSI_IO_REQUEST *req;
1830 struct ccb_scsiio *csio;
1831 struct mpr_softc *sc;
1832 struct mprsas_target *targ;
1833 struct mprsas_lun *lun;
1834 struct mpr_command *cm;
1835 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1836 uint16_t eedp_flags;
1837 uint32_t mpi_control;
1842 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1845 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1846 ("Target %d out of bounds in XPT_SCSI_IO\n",
1847 csio->ccb_h.target_id));
1848 targ = &sassc->targets[csio->ccb_h.target_id];
1849 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1850 if (targ->handle == 0x0) {
1851 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1852 __func__, csio->ccb_h.target_id);
1853 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1857 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1858 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1859 "supported %u\n", __func__, csio->ccb_h.target_id);
1860 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1865 * Sometimes, it is possible to get a command that is not "In
1866 * Progress" and was actually aborted by the upper layer. Check for
1867 * this here and complete the command without error.
1869 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1870 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1871 "target %u\n", __func__, csio->ccb_h.target_id);
1876 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1877 * that the volume has timed out. We want volumes to be enumerated
1878 * until they are deleted/removed, not just failed. In either event,
1879 * we're removing the target due to a firmware event telling us
1880 * the device is now gone (as opposed to some transient event). Since
1881 * we're opting to remove failed devices from the OS's view, we need
1882 * to propagate that status up the stack.
1884 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1885 if (targ->devinfo == 0)
1886 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1888 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1893 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1894 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1895 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1901 * If target has a reset in progress, freeze the devq and return. The
1902 * devq will be released when the TM reset is finished.
1904 if (targ->flags & MPRSAS_TARGET_INRESET) {
1905 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1906 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1907 __func__, targ->tid);
1908 xpt_freeze_devq(ccb->ccb_h.path, 1);
1913 cm = mpr_alloc_command(sc);
1914 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1916 mpr_free_command(sc, cm);
1918 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1919 xpt_freeze_simq(sassc->sim, 1);
1920 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1922 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1923 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1928 /* For NVME device's issue UNMAP command directly to NVME drives by
1929 * constructing equivalent native NVMe DataSetManagement command.
1931 scsi_opcode = scsiio_cdb_ptr(csio)[0];
1932 if (scsi_opcode == UNMAP &&
1934 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1935 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
1936 if (rc == 1) { /* return command to CAM with success status */
1937 mpr_free_command(sc, cm);
1938 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1941 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
1945 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1946 bzero(req, sizeof(*req));
1947 req->DevHandle = htole16(targ->handle);
1948 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1950 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1951 req->SenseBufferLength = MPR_SENSE_LEN;
1953 req->ChainOffset = 0;
1954 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1959 req->DataLength = htole32(csio->dxfer_len);
1960 req->BidirectionalDataLength = 0;
1961 req->IoFlags = htole16(csio->cdb_len);
1964 /* Note: BiDirectional transfers are not supported */
1965 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1967 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1968 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
1971 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1972 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
1976 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1980 if (csio->cdb_len == 32)
1981 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1983 * It looks like the hardware doesn't require an explicit tag
1984 * number for each transaction. SAM Task Management not supported
1987 switch (csio->tag_action) {
1988 case MSG_HEAD_OF_Q_TAG:
1989 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1991 case MSG_ORDERED_Q_TAG:
1992 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1995 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1997 case CAM_TAG_ACTION_NONE:
1998 case MSG_SIMPLE_Q_TAG:
2000 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2003 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2004 req->Control = htole32(mpi_control);
2006 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2007 mpr_free_command(sc, cm);
2008 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2013 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2014 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2016 KASSERT(csio->cdb_len <= IOCDBLEN,
2017 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2018 "is not set", csio->cdb_len));
2019 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2021 req->IoFlags = htole16(csio->cdb_len);
2024 * Check if EEDP is supported and enabled. If it is then check if the
2025 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2026 * is formatted for EEDP support. If all of this is true, set CDB up
2027 * for EEDP transfer.
2029 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2030 if (sc->eedp_enabled && eedp_flags) {
2031 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2032 if (lun->lun_id == csio->ccb_h.target_lun) {
2037 if ((lun != NULL) && (lun->eedp_formatted)) {
2038 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2039 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2040 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2041 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2042 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2044 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2046 req->EEDPFlags = htole16(eedp_flags);
2049 * If CDB less than 32, fill in Primary Ref Tag with
2050 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2051 * already there. Also, set protection bit. FreeBSD
2052 * currently does not support CDBs bigger than 16, but
2053 * the code doesn't hurt, and will be here for the
2056 if (csio->cdb_len != 32) {
2057 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2058 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2059 PrimaryReferenceTag;
2060 for (i = 0; i < 4; i++) {
2062 req->CDB.CDB32[lba_byte + i];
2065 req->CDB.EEDP32.PrimaryReferenceTag =
2067 CDB.EEDP32.PrimaryReferenceTag);
2068 req->CDB.EEDP32.PrimaryApplicationTagMask =
2071 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2074 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2075 req->EEDPFlags = htole16(eedp_flags);
2076 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2082 cm->cm_length = csio->dxfer_len;
2083 if (cm->cm_length != 0) {
2085 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2089 cm->cm_sge = &req->SGL;
2090 cm->cm_sglsize = (32 - 24) * 4;
2091 cm->cm_complete = mprsas_scsiio_complete;
2092 cm->cm_complete_data = ccb;
2094 cm->cm_lun = csio->ccb_h.target_lun;
2097 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2098 * and set descriptor type.
2100 if (targ->scsi_req_desc_type ==
2101 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2102 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2103 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2104 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2105 if (!sc->atomic_desc_capable) {
2106 cm->cm_desc.FastPathSCSIIO.DevHandle =
2107 htole16(targ->handle);
2110 cm->cm_desc.SCSIIO.RequestFlags =
2111 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2112 if (!sc->atomic_desc_capable)
2113 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2116 csio->ccb_h.qos.sim_data = sbinuptime();
2117 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2118 mprsas_scsiio_timeout, cm, 0);
2121 targ->outstanding++;
2122 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2123 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2125 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2126 __func__, cm, ccb, targ->outstanding);
2128 mpr_map_command(sc, cm);
2133 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2136 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2137 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2141 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2142 MPI2_IOCSTATUS_MASK;
2143 u8 scsi_state = mpi_reply->SCSIState;
2144 u8 scsi_status = mpi_reply->SCSIStatus;
2145 char *desc_ioc_state = NULL;
2146 char *desc_scsi_status = NULL;
2147 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2149 if (log_info == 0x31170000)
2152 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2154 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2157 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2158 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2159 if (targ->encl_level_valid) {
2160 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2161 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2162 targ->connector_name);
2166 * We can add more detail about underflow data here
2169 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2170 "scsi_state %b\n", desc_scsi_status, scsi_status,
2171 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2172 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2174 if (sc->mpr_debug & MPR_XINFO &&
2175 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2176 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2177 scsi_sense_print(csio);
2178 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2181 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2182 response_info = le32toh(mpi_reply->ResponseInfo);
2183 response_bytes = (u8 *)&response_info;
2184 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2186 mpr_describe_table(mpr_scsi_taskmgmt_string,
2187 response_bytes[0]));
2191 /** mprsas_nvme_trans_status_code
2193 * Convert Native NVMe command error status to
2194 * equivalent SCSI error status.
2196 * Returns appropriate scsi_status
2199 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2200 struct mpr_command *cm)
2202 u8 status = MPI2_SCSI_STATUS_GOOD;
2203 int skey, asc, ascq;
2204 union ccb *ccb = cm->cm_complete_data;
2205 int returned_sense_len;
2208 sct = NVME_STATUS_GET_SCT(nvme_status);
2209 sc = NVME_STATUS_GET_SC(nvme_status);
2211 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2212 skey = SSD_KEY_ILLEGAL_REQUEST;
2213 asc = SCSI_ASC_NO_SENSE;
2214 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2217 case NVME_SCT_GENERIC:
2219 case NVME_SC_SUCCESS:
2220 status = MPI2_SCSI_STATUS_GOOD;
2221 skey = SSD_KEY_NO_SENSE;
2222 asc = SCSI_ASC_NO_SENSE;
2223 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2225 case NVME_SC_INVALID_OPCODE:
2226 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2227 skey = SSD_KEY_ILLEGAL_REQUEST;
2228 asc = SCSI_ASC_ILLEGAL_COMMAND;
2229 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2231 case NVME_SC_INVALID_FIELD:
2232 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2233 skey = SSD_KEY_ILLEGAL_REQUEST;
2234 asc = SCSI_ASC_INVALID_CDB;
2235 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2237 case NVME_SC_DATA_TRANSFER_ERROR:
2238 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2239 skey = SSD_KEY_MEDIUM_ERROR;
2240 asc = SCSI_ASC_NO_SENSE;
2241 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2243 case NVME_SC_ABORTED_POWER_LOSS:
2244 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2245 skey = SSD_KEY_ABORTED_COMMAND;
2246 asc = SCSI_ASC_WARNING;
2247 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2249 case NVME_SC_INTERNAL_DEVICE_ERROR:
2250 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2251 skey = SSD_KEY_HARDWARE_ERROR;
2252 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2253 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2255 case NVME_SC_ABORTED_BY_REQUEST:
2256 case NVME_SC_ABORTED_SQ_DELETION:
2257 case NVME_SC_ABORTED_FAILED_FUSED:
2258 case NVME_SC_ABORTED_MISSING_FUSED:
2259 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2260 skey = SSD_KEY_ABORTED_COMMAND;
2261 asc = SCSI_ASC_NO_SENSE;
2262 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2264 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2265 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2266 skey = SSD_KEY_ILLEGAL_REQUEST;
2267 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2268 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2270 case NVME_SC_LBA_OUT_OF_RANGE:
2271 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2272 skey = SSD_KEY_ILLEGAL_REQUEST;
2273 asc = SCSI_ASC_ILLEGAL_BLOCK;
2274 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2276 case NVME_SC_CAPACITY_EXCEEDED:
2277 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2278 skey = SSD_KEY_MEDIUM_ERROR;
2279 asc = SCSI_ASC_NO_SENSE;
2280 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2282 case NVME_SC_NAMESPACE_NOT_READY:
2283 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2284 skey = SSD_KEY_NOT_READY;
2285 asc = SCSI_ASC_LUN_NOT_READY;
2286 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2290 case NVME_SCT_COMMAND_SPECIFIC:
2292 case NVME_SC_INVALID_FORMAT:
2293 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2294 skey = SSD_KEY_ILLEGAL_REQUEST;
2295 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2296 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2298 case NVME_SC_CONFLICTING_ATTRIBUTES:
2299 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2300 skey = SSD_KEY_ILLEGAL_REQUEST;
2301 asc = SCSI_ASC_INVALID_CDB;
2302 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2306 case NVME_SCT_MEDIA_ERROR:
2308 case NVME_SC_WRITE_FAULTS:
2309 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2310 skey = SSD_KEY_MEDIUM_ERROR;
2311 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2312 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2314 case NVME_SC_UNRECOVERED_READ_ERROR:
2315 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2316 skey = SSD_KEY_MEDIUM_ERROR;
2317 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2318 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2320 case NVME_SC_GUARD_CHECK_ERROR:
2321 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2322 skey = SSD_KEY_MEDIUM_ERROR;
2323 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2324 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2326 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2327 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2328 skey = SSD_KEY_MEDIUM_ERROR;
2329 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2330 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2332 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2333 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2334 skey = SSD_KEY_MEDIUM_ERROR;
2335 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2336 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2338 case NVME_SC_COMPARE_FAILURE:
2339 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2340 skey = SSD_KEY_MISCOMPARE;
2341 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2342 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2344 case NVME_SC_ACCESS_DENIED:
2345 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2346 skey = SSD_KEY_ILLEGAL_REQUEST;
2347 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2348 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2354 returned_sense_len = sizeof(struct scsi_sense_data);
2355 if (returned_sense_len < ccb->csio.sense_len)
2356 ccb->csio.sense_resid = ccb->csio.sense_len -
2359 ccb->csio.sense_resid = 0;
2361 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2362 1, skey, asc, ascq, SSD_ELEM_NONE);
2363 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2368 /** mprsas_complete_nvme_unmap
2370 * Complete native NVMe command issued using NVMe Encapsulated
2374 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2376 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2377 struct nvme_completion *nvme_completion = NULL;
2378 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2380 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2381 if (le16toh(mpi_reply->ErrorResponseCount)){
2382 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2383 scsi_status = mprsas_nvme_trans_status_code(
2384 nvme_completion->status, cm);
2390 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2392 MPI2_SCSI_IO_REPLY *rep;
2394 struct ccb_scsiio *csio;
2395 struct mprsas_softc *sassc;
2396 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2397 u8 *TLR_bits, TLR_on, *scsi_cdb;
2400 struct mprsas_target *target;
2401 target_id_t target_id;
2404 mpr_dprint(sc, MPR_TRACE,
2405 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2406 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2407 cm->cm_targ->outstanding);
2409 callout_stop(&cm->cm_callout);
2410 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2413 ccb = cm->cm_complete_data;
2415 target_id = csio->ccb_h.target_id;
2416 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2418 * XXX KDM if the chain allocation fails, does it matter if we do
2419 * the sync and unload here? It is simpler to do it in every case,
2420 * assuming it doesn't cause problems.
2422 if (cm->cm_data != NULL) {
2423 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2424 dir = BUS_DMASYNC_POSTREAD;
2425 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2426 dir = BUS_DMASYNC_POSTWRITE;
2427 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2428 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2431 cm->cm_targ->completed++;
2432 cm->cm_targ->outstanding--;
2433 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2434 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2436 if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2437 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2438 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2439 ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2440 cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2441 if (cm->cm_reply != NULL)
2442 mprsas_log_command(cm, MPR_RECOVERY,
2443 "completed timedout cm %p ccb %p during recovery "
2444 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2445 le16toh(rep->IOCStatus), rep->SCSIStatus,
2446 rep->SCSIState, le32toh(rep->TransferCount));
2448 mprsas_log_command(cm, MPR_RECOVERY,
2449 "completed timedout cm %p ccb %p during recovery\n",
2451 } else if (cm->cm_targ->tm != NULL) {
2452 if (cm->cm_reply != NULL)
2453 mprsas_log_command(cm, MPR_RECOVERY,
2454 "completed cm %p ccb %p during recovery "
2455 "ioc %x scsi %x state %x xfer %u\n",
2456 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2457 rep->SCSIStatus, rep->SCSIState,
2458 le32toh(rep->TransferCount));
2460 mprsas_log_command(cm, MPR_RECOVERY,
2461 "completed cm %p ccb %p during recovery\n",
2463 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2464 mprsas_log_command(cm, MPR_RECOVERY,
2465 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2468 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2470 * We ran into an error after we tried to map the command,
2471 * so we're getting a callback without queueing the command
2472 * to the hardware. So we set the status here, and it will
2473 * be retained below. We'll go through the "fast path",
2474 * because there can be no reply when we haven't actually
2475 * gone out to the hardware.
2477 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2480 * Currently the only error included in the mask is
2481 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2482 * chain frames. We need to freeze the queue until we get
2483 * a command that completed without this error, which will
2484 * hopefully have some chain frames attached that we can
2485 * use. If we wanted to get smarter about it, we would
2486 * only unfreeze the queue in this condition when we're
2487 * sure that we're getting some chain frames back. That's
2488 * probably unnecessary.
2490 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2491 xpt_freeze_simq(sassc->sim, 1);
2492 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2493 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2494 "freezing SIM queue\n");
2499 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2500 * flag, and use it in a few places in the rest of this function for
2501 * convenience. Use the macro if available.
2503 scsi_cdb = scsiio_cdb_ptr(csio);
2506 * If this is a Start Stop Unit command and it was issued by the driver
2507 * during shutdown, decrement the refcount to account for all of the
2508 * commands that were sent. All SSU commands should be completed before
2509 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2512 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2513 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2517 /* Take the fast path to completion */
2518 if (cm->cm_reply == NULL) {
2519 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2520 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2521 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2523 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2524 csio->scsi_status = SCSI_STATUS_OK;
2526 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2527 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2528 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2529 mpr_dprint(sc, MPR_XINFO,
2530 "Unfreezing SIM queue\n");
2535 * There are two scenarios where the status won't be
2536 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2537 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2539 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2541 * Freeze the dev queue so that commands are
2542 * executed in the correct order after error
2545 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2546 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2548 mpr_free_command(sc, cm);
2553 target = &sassc->targets[target_id];
2554 if (scsi_cdb[0] == UNMAP &&
2556 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2557 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2558 csio->scsi_status = rep->SCSIStatus;
2561 mprsas_log_command(cm, MPR_XINFO,
2562 "ioc %x scsi %x state %x xfer %u\n",
2563 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2564 le32toh(rep->TransferCount));
2566 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2567 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2568 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2570 case MPI2_IOCSTATUS_SUCCESS:
2571 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2572 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2573 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2574 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2576 /* Completion failed at the transport level. */
2577 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2578 MPI2_SCSI_STATE_TERMINATED)) {
2579 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2583 /* In a modern packetized environment, an autosense failure
2584 * implies that there's not much else that can be done to
2585 * recover the command.
2587 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2588 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2593 * CAM doesn't care about SAS Response Info data, but if this is
2594 * the state check if TLR should be done. If not, clear the
2595 * TLR_bits for the target.
2597 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2598 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2599 == MPR_SCSI_RI_INVALID_FRAME)) {
2600 sc->mapping_table[target_id].TLR_bits =
2601 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2605 * Intentionally override the normal SCSI status reporting
2606 * for these two cases. These are likely to happen in a
2607 * multi-initiator environment, and we want to make sure that
2608 * CAM retries these commands rather than fail them.
2610 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2611 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2612 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2616 /* Handle normal status and sense */
2617 csio->scsi_status = rep->SCSIStatus;
2618 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2619 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2621 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2623 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2624 int sense_len, returned_sense_len;
2626 returned_sense_len = min(le32toh(rep->SenseCount),
2627 sizeof(struct scsi_sense_data));
2628 if (returned_sense_len < csio->sense_len)
2629 csio->sense_resid = csio->sense_len -
2632 csio->sense_resid = 0;
2634 sense_len = min(returned_sense_len,
2635 csio->sense_len - csio->sense_resid);
2636 bzero(&csio->sense_data, sizeof(csio->sense_data));
2637 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2638 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2642 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2643 * and it's page code 0 (Supported Page List), and there is
2644 * inquiry data, and this is for a sequential access device, and
2645 * the device is an SSP target, and TLR is supported by the
2646 * controller, turn the TLR_bits value ON if page 0x90 is
2649 if ((scsi_cdb[0] == INQUIRY) &&
2650 (scsi_cdb[1] & SI_EVPD) &&
2651 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2652 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2653 (csio->data_ptr != NULL) &&
2654 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2655 (sc->control_TLR) &&
2656 (sc->mapping_table[target_id].device_info &
2657 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2658 vpd_list = (struct scsi_vpd_supported_page_list *)
2660 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2661 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2662 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2663 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2664 alloc_len -= csio->resid;
2665 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2666 if (vpd_list->list[i] == 0x90) {
2674 * If this is a SATA direct-access end device, mark it so that
2675 * a SCSI StartStopUnit command will be sent to it when the
2676 * driver is being shutdown.
2678 if ((scsi_cdb[0] == INQUIRY) &&
2679 (csio->data_ptr != NULL) &&
2680 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2681 (sc->mapping_table[target_id].device_info &
2682 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2683 ((sc->mapping_table[target_id].device_info &
2684 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2685 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2686 target = &sassc->targets[target_id];
2687 target->supports_SSU = TRUE;
2688 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2692 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2693 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2695 * If devinfo is 0 this will be a volume. In that case don't
2696 * tell CAM that the volume is not there. We want volumes to
2697 * be enumerated until they are deleted/removed, not just
2700 if (cm->cm_targ->devinfo == 0)
2701 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2703 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2705 case MPI2_IOCSTATUS_INVALID_SGL:
2706 mpr_print_scsiio_cmd(sc, cm);
2707 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2709 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2711 * This is one of the responses that comes back when an I/O
2712 * has been aborted. If it is because of a timeout that we
2713 * initiated, just set the status to CAM_CMD_TIMEOUT.
2714 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2715 * command is the same (it gets retried, subject to the
2716 * retry counter), the only difference is what gets printed
2719 if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2720 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2722 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2724 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2725 /* resid is ignored for this condition */
2727 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2729 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2730 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2732 * These can sometimes be transient transport-related
2733 * errors, and sometimes persistent drive-related errors.
2734 * We used to retry these without decrementing the retry
2735 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2736 * we hit a persistent drive problem that returns one of
2737 * these error codes, we would retry indefinitely. So,
2738 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2739 * count and avoid infinite retries. We're taking the
2740 * potential risk of flagging false failures in the event
2741 * of a topology-related error (e.g. a SAS expander problem
2742 * causes a command addressed to a drive to fail), but
2743 * avoiding getting into an infinite retry loop. However,
2744 * if we get them while were moving a device, we should
2745 * fail the request as 'not there' because the device
2746 * is effectively gone.
2748 if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2749 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2751 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2752 mpr_dprint(sc, MPR_INFO,
2753 "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2754 mpr_describe_table(mpr_iocstatus_string,
2755 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2756 target_id, cm->cm_desc.Default.SMID,
2757 le32toh(rep->IOCLogInfo),
2758 (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2759 mpr_dprint(sc, MPR_XINFO,
2760 "SCSIStatus %x SCSIState %x xfercount %u\n",
2761 rep->SCSIStatus, rep->SCSIState,
2762 le32toh(rep->TransferCount));
2764 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2765 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2766 case MPI2_IOCSTATUS_INVALID_VPID:
2767 case MPI2_IOCSTATUS_INVALID_FIELD:
2768 case MPI2_IOCSTATUS_INVALID_STATE:
2769 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2770 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2771 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2772 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2773 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2775 mprsas_log_command(cm, MPR_XINFO,
2776 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2777 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2778 rep->SCSIStatus, rep->SCSIState,
2779 le32toh(rep->TransferCount));
2780 csio->resid = cm->cm_length;
2782 if (scsi_cdb[0] == UNMAP &&
2784 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2785 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2787 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2792 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2794 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2795 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2796 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2797 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2801 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2802 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2803 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2807 * Check to see if we're removing the device. If so, and this is the
2808 * last command on the queue, proceed with the deferred removal of the
2809 * device. Note, for removing a volume, this won't trigger because
2810 * pending_remove_tm will be NULL.
2812 if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2813 if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2814 cm->cm_targ->pending_remove_tm != NULL) {
2815 mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n");
2816 mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2817 cm->cm_targ->pending_remove_tm = NULL;
2821 mpr_free_command(sc, cm);
2826 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2828 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2829 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2833 ccb = cm->cm_complete_data;
2836 * Currently there should be no way we can hit this case. It only
2837 * happens when we have a failure to allocate chain frames, and SMP
2838 * commands require two S/G elements only. That should be handled
2839 * in the standard request size.
2841 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2842 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2843 "request!\n", __func__, cm->cm_flags);
2844 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2848 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2850 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2851 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2855 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2856 sasaddr = le32toh(req->SASAddress.Low);
2857 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2859 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2860 MPI2_IOCSTATUS_SUCCESS ||
2861 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2862 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2863 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2864 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2868 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2869 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2871 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2872 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2874 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2878 * We sync in both directions because we had DMAs in the S/G list
2879 * in both directions.
2881 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2882 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2883 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2884 mpr_free_command(sc, cm);
2889 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2891 struct mpr_command *cm;
2892 uint8_t *request, *response;
2893 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2894 struct mpr_softc *sc;
2902 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2903 case CAM_DATA_PADDR:
2904 case CAM_DATA_SG_PADDR:
2906 * XXX We don't yet support physical addresses here.
2908 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2909 "supported\n", __func__);
2910 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2915 * The chip does not support more than one buffer for the
2916 * request or response.
2918 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2919 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2920 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2921 "response buffer segments not supported for SMP\n",
2923 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2929 * The CAM_SCATTER_VALID flag was originally implemented
2930 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2931 * We have two. So, just take that flag to mean that we
2932 * might have S/G lists, and look at the S/G segment count
2933 * to figure out whether that is the case for each individual
2936 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2937 bus_dma_segment_t *req_sg;
2939 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2940 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2942 request = ccb->smpio.smp_request;
2944 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2945 bus_dma_segment_t *rsp_sg;
2947 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2948 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2950 response = ccb->smpio.smp_response;
2952 case CAM_DATA_VADDR:
2953 request = ccb->smpio.smp_request;
2954 response = ccb->smpio.smp_response;
2957 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2962 cm = mpr_alloc_command(sc);
2964 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
2966 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2971 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2972 bzero(req, sizeof(*req));
2973 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2975 /* Allow the chip to use any route to this SAS address. */
2976 req->PhysicalPort = 0xff;
2978 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2980 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2982 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
2983 "%#jx\n", __func__, (uintmax_t)sasaddr);
2985 mpr_init_sge(cm, req, &req->SGL);
2988 * Set up a uio to pass into mpr_map_command(). This allows us to
2989 * do one map command, and one busdma call in there.
2991 cm->cm_uio.uio_iov = cm->cm_iovec;
2992 cm->cm_uio.uio_iovcnt = 2;
2993 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2996 * The read/write flag isn't used by busdma, but set it just in
2997 * case. This isn't exactly accurate, either, since we're going in
3000 cm->cm_uio.uio_rw = UIO_WRITE;
3002 cm->cm_iovec[0].iov_base = request;
3003 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3004 cm->cm_iovec[1].iov_base = response;
3005 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3007 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3008 cm->cm_iovec[1].iov_len;
3011 * Trigger a warning message in mpr_data_cb() for the user if we
3012 * wind up exceeding two S/G segments. The chip expects one
3013 * segment for the request and another for the response.
3015 cm->cm_max_segs = 2;
3017 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3018 cm->cm_complete = mprsas_smpio_complete;
3019 cm->cm_complete_data = ccb;
3022 * Tell the mapping code that we're using a uio, and that this is
3023 * an SMP passthrough request. There is a little special-case
3024 * logic there (in mpr_data_cb()) to handle the bidirectional
3027 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3028 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3030 /* The chip data format is little endian. */
3031 req->SASAddress.High = htole32(sasaddr >> 32);
3032 req->SASAddress.Low = htole32(sasaddr);
3035 * XXX Note that we don't have a timeout/abort mechanism here.
3036 * From the manual, it looks like task management requests only
3037 * work for SCSI IO and SATA passthrough requests. We may need to
3038 * have a mechanism to retry requests in the event of a chip reset
3039 * at least. Hopefully the chip will insure that any errors short
3040 * of that are relayed back to the driver.
3042 error = mpr_map_command(sc, cm);
3043 if ((error != 0) && (error != EINPROGRESS)) {
3044 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3045 "mpr_map_command()\n", __func__, error);
3052 mpr_free_command(sc, cm);
3053 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3059 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3061 struct mpr_softc *sc;
3062 struct mprsas_target *targ;
3063 uint64_t sasaddr = 0;
3068 * Make sure the target exists.
3070 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3071 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3072 targ = &sassc->targets[ccb->ccb_h.target_id];
3073 if (targ->handle == 0x0) {
3074 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3075 __func__, ccb->ccb_h.target_id);
3076 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3082 * If this device has an embedded SMP target, we'll talk to it
3084 * figure out what the expander's address is.
3086 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3087 sasaddr = targ->sasaddr;
3090 * If we don't have a SAS address for the expander yet, try
3091 * grabbing it from the page 0x83 information cached in the
3092 * transport layer for this target. LSI expanders report the
3093 * expander SAS address as the port-associated SAS address in
3094 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3097 * XXX KDM disable this for now, but leave it commented out so that
3098 * it is obvious that this is another possible way to get the SAS
3101 * The parent handle method below is a little more reliable, and
3102 * the other benefit is that it works for devices other than SES
3103 * devices. So you can send a SMP request to a da(4) device and it
3104 * will get routed to the expander that device is attached to.
3105 * (Assuming the da(4) device doesn't contain an SMP target...)
3109 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3113 * If we still don't have a SAS address for the expander, look for
3114 * the parent device of this device, which is probably the expander.
3117 #ifdef OLD_MPR_PROBE
3118 struct mprsas_target *parent_target;
3121 if (targ->parent_handle == 0x0) {
3122 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3123 "a valid parent handle!\n", __func__, targ->handle);
3124 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3127 #ifdef OLD_MPR_PROBE
3128 parent_target = mprsas_find_target_by_handle(sassc, 0,
3129 targ->parent_handle);
3131 if (parent_target == NULL) {
3132 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3133 "a valid parent target!\n", __func__, targ->handle);
3134 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3138 if ((parent_target->devinfo &
3139 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3140 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3141 "does not have an SMP target!\n", __func__,
3142 targ->handle, parent_target->handle);
3143 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3147 sasaddr = parent_target->sasaddr;
3148 #else /* OLD_MPR_PROBE */
3149 if ((targ->parent_devinfo &
3150 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3151 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3152 "does not have an SMP target!\n", __func__,
3153 targ->handle, targ->parent_handle);
3154 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3158 if (targ->parent_sasaddr == 0x0) {
3159 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3160 "%d does not have a valid SAS address!\n", __func__,
3161 targ->handle, targ->parent_handle);
3162 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3166 sasaddr = targ->parent_sasaddr;
3167 #endif /* OLD_MPR_PROBE */
3172 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3173 "handle %d\n", __func__, targ->handle);
3174 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3177 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3187 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3189 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3190 struct mpr_softc *sc;
3191 struct mpr_command *tm;
3192 struct mprsas_target *targ;
3194 MPR_FUNCTRACE(sassc->sc);
3195 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3197 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3198 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3200 tm = mprsas_alloc_tm(sc);
3202 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3203 "mprsas_action_resetdev\n");
3204 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3209 targ = &sassc->targets[ccb->ccb_h.target_id];
3210 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3211 req->DevHandle = htole16(targ->handle);
3212 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3214 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3215 /* SAS Hard Link Reset / SATA Link Reset */
3216 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3218 /* PCIe Protocol Level Reset*/
3220 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3224 tm->cm_complete = mprsas_resetdev_complete;
3225 tm->cm_complete_data = ccb;
3227 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3228 __func__, targ->tid);
3231 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3232 mpr_map_command(sc, tm);
3236 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3238 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3242 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3244 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3245 ccb = tm->cm_complete_data;
3248 * Currently there should be no way we can hit this case. It only
3249 * happens when we have a failure to allocate chain frames, and
3250 * task management commands don't have S/G lists.
3252 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3253 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3255 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3257 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3258 "handle %#04x! This should not happen!\n", __func__,
3259 tm->cm_flags, req->DevHandle);
3260 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3264 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3265 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3267 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3268 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3269 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3273 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3277 mprsas_free_tm(sc, tm);
3282 mprsas_poll(struct cam_sim *sim)
3284 struct mprsas_softc *sassc;
3286 sassc = cam_sim_softc(sim);
3288 if (sassc->sc->mpr_debug & MPR_TRACE) {
3289 /* frequent debug messages during a panic just slow
3290 * everything down too much.
3292 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3294 sassc->sc->mpr_debug &= ~MPR_TRACE;
3297 mpr_intr_locked(sassc->sc);
3301 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3304 struct mpr_softc *sc;
3306 sc = (struct mpr_softc *)callback_arg;
3309 case AC_ADVINFO_CHANGED: {
3310 struct mprsas_target *target;
3311 struct mprsas_softc *sassc;
3312 struct scsi_read_capacity_data_long rcap_buf;
3313 struct ccb_dev_advinfo cdai;
3314 struct mprsas_lun *lun;
3319 buftype = (uintptr_t)arg;
3325 * We're only interested in read capacity data changes.
3327 if (buftype != CDAI_TYPE_RCAPLONG)
3331 * We should have a handle for this, but check to make sure.
3333 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3334 ("Target %d out of bounds in mprsas_async\n",
3335 xpt_path_target_id(path)));
3336 target = &sassc->targets[xpt_path_target_id(path)];
3337 if (target->handle == 0)
3340 lunid = xpt_path_lun_id(path);
3342 SLIST_FOREACH(lun, &target->luns, lun_link) {
3343 if (lun->lun_id == lunid) {
3349 if (found_lun == 0) {
3350 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3353 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3354 "LUN for EEDP support.\n");
3357 lun->lun_id = lunid;
3358 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3361 bzero(&rcap_buf, sizeof(rcap_buf));
3362 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3363 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3364 cdai.ccb_h.flags = CAM_DIR_IN;
3365 cdai.buftype = CDAI_TYPE_RCAPLONG;
3366 cdai.flags = CDAI_FLAG_NONE;
3367 cdai.bufsiz = sizeof(rcap_buf);
3368 cdai.buf = (uint8_t *)&rcap_buf;
3369 xpt_action((union ccb *)&cdai);
3370 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3371 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3373 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3374 && (rcap_buf.prot & SRC16_PROT_EN)) {
3375 switch (rcap_buf.prot & SRC16_P_TYPE) {
3378 lun->eedp_formatted = TRUE;
3379 lun->eedp_block_size =
3380 scsi_4btoul(rcap_buf.length);
3384 lun->eedp_formatted = FALSE;
3385 lun->eedp_block_size = 0;
3389 lun->eedp_formatted = FALSE;
3390 lun->eedp_block_size = 0;
3394 case AC_FOUND_DEVICE:
3401 * Set the INRESET flag for this target so that no I/O will be sent to
3402 * the target until the reset has completed. If an I/O request does
3403 * happen, the devq will be frozen. The CCB holds the path which is
3404 * used to release the devq. The devq is released and the CCB is freed
3405 * when the TM completes.
3408 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3409 struct mprsas_target *target, lun_id_t lun_id)
3414 ccb = xpt_alloc_ccb_nowait();
3416 path_id = cam_sim_path(sc->sassc->sim);
3417 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3418 target->tid, lun_id) != CAM_REQ_CMP) {
3422 tm->cm_targ = target;
3423 target->flags |= MPRSAS_TARGET_INRESET;
3429 mprsas_startup(struct mpr_softc *sc)
3432 * Send the port enable message and set the wait_for_port_enable flag.
3433 * This flag helps to keep the simq frozen until all discovery events
3436 sc->wait_for_port_enable = 1;
3437 mprsas_send_portenable(sc);
3442 mprsas_send_portenable(struct mpr_softc *sc)
3444 MPI2_PORT_ENABLE_REQUEST *request;
3445 struct mpr_command *cm;
3449 if ((cm = mpr_alloc_command(sc)) == NULL)
3451 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3452 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3453 request->MsgFlags = 0;
3455 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3456 cm->cm_complete = mprsas_portenable_complete;
3460 mpr_map_command(sc, cm);
3461 mpr_dprint(sc, MPR_XINFO,
3462 "mpr_send_portenable finished cm %p req %p complete %p\n",
3463 cm, cm->cm_req, cm->cm_complete);
3468 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3470 MPI2_PORT_ENABLE_REPLY *reply;
3471 struct mprsas_softc *sassc;
3477 * Currently there should be no way we can hit this case. It only
3478 * happens when we have a failure to allocate chain frames, and
3479 * port enable commands don't have S/G lists.
3481 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3482 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3483 "This should not happen!\n", __func__, cm->cm_flags);
3486 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3488 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3489 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3490 MPI2_IOCSTATUS_SUCCESS)
3491 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3493 mpr_free_command(sc, cm);
3495 * Done waiting for port enable to complete. Decrement the refcount.
3496 * If refcount is 0, discovery is complete and a rescan of the bus can
3499 sc->wait_for_port_enable = 0;
3500 sc->port_enable_complete = 1;
3501 wakeup(&sc->port_enable_complete);
3502 mprsas_startup_decrement(sassc);
3506 mprsas_check_id(struct mprsas_softc *sassc, int id)
3508 struct mpr_softc *sc = sassc->sc;
3512 ids = &sc->exclude_ids[0];
3513 while((name = strsep(&ids, ",")) != NULL) {
3514 if (name[0] == '\0')
3516 if (strtol(name, NULL, 0) == (long)id)
3524 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3526 struct mprsas_softc *sassc;
3527 struct mprsas_lun *lun, *lun_tmp;
3528 struct mprsas_target *targ;
3533 * The number of targets is based on IOC Facts, so free all of
3534 * the allocated LUNs for each target and then the target buffer
3537 for (i=0; i< maxtargets; i++) {
3538 targ = &sassc->targets[i];
3539 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3543 free(sassc->targets, M_MPR);
3545 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3546 M_MPR, M_WAITOK|M_ZERO);