2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 /* Communications core for Avago Technologies (LSI) MPT3 */
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
48 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
57 #include <machine/bus.h>
58 #include <machine/resource.h>
61 #include <machine/stdarg.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/nvme/nvme.h>
78 #include <dev/mpr/mpi/mpi2_type.h>
79 #include <dev/mpr/mpi/mpi2.h>
80 #include <dev/mpr/mpi/mpi2_ioc.h>
81 #include <dev/mpr/mpi/mpi2_sas.h>
82 #include <dev/mpr/mpi/mpi2_pci.h>
83 #include <dev/mpr/mpi/mpi2_cnfg.h>
84 #include <dev/mpr/mpi/mpi2_init.h>
85 #include <dev/mpr/mpi/mpi2_tool.h>
86 #include <dev/mpr/mpr_ioctl.h>
87 #include <dev/mpr/mprvar.h>
88 #include <dev/mpr/mpr_table.h>
89 #include <dev/mpr/mpr_sas.h>
91 #define MPRSAS_DISCOVERY_TIMEOUT 20
92 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
95 * static array to check SCSI OpCode for EEDP protection bits
97 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
119 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
121 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
123 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mprsas_poll(struct cam_sim *sim);
125 static void mprsas_scsiio_timeout(void *data);
126 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
127 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
128 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
129 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
130 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
131 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
132 struct mpr_command *cm);
133 static void mprsas_async(void *callback_arg, uint32_t code,
134 struct cam_path *path, void *arg);
135 #if (__FreeBSD_version < 901503) || \
136 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
137 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
138 struct ccb_getdev *cgd);
139 static void mprsas_read_cap_done(struct cam_periph *periph,
140 union ccb *done_ccb);
142 static int mprsas_send_portenable(struct mpr_softc *sc);
143 static void mprsas_portenable_complete(struct mpr_softc *sc,
144 struct mpr_command *cm);
146 #if __FreeBSD_version >= 900026
147 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
148 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
150 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif //FreeBSD_version >= 900026
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
157 struct mprsas_target *target;
160 for (i = start; i < sassc->maxtargets; i++) {
161 target = &sassc->targets[i];
162 if (target->handle == handle)
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170 * commands before device handles have been found by discovery. Since
171 * discovery involves reading config pages and possibly sending commands,
172 * discovery actions may continue even after we receive the end of discovery
173 * event, so refcount discovery actions instead of assuming we can unfreeze
174 * the simq when we get the event.
177 mprsas_startup_increment(struct mprsas_softc *sassc)
179 MPR_FUNCTRACE(sassc->sc);
181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 if (sassc->startup_refcount++ == 0) {
183 /* just starting, freeze the simq */
184 mpr_dprint(sassc->sc, MPR_INIT,
185 "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
190 xpt_freeze_simq(sassc->sim, 1);
192 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 sassc->startup_refcount);
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
200 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 xpt_release_simq(sassc->sim, 1);
203 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
210 MPR_FUNCTRACE(sassc->sc);
212 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 if (--sassc->startup_refcount == 0) {
214 /* finished all discovery-related actions, release
215 * the simq and rescan for the latest topology.
217 mpr_dprint(sassc->sc, MPR_INIT,
218 "%s releasing simq\n", __func__);
219 sassc->flags &= ~MPRSAS_IN_STARTUP;
220 xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
225 mprsas_rescan_target(sassc->sc, NULL);
228 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 sassc->startup_refcount);
234 * The firmware requires us to stop sending commands when we're doing task
237 * XXX The logic for serializing the device has been made lazy and moved to
238 * mprsas_prepare_for_tm().
241 mprsas_alloc_tm(struct mpr_softc *sc)
243 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
244 struct mpr_command *tm;
247 tm = mpr_alloc_high_priority_command(sc);
251 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
252 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
259 int target_id = 0xFFFFFFFF;
266 * For TM's the devq is frozen for the device. Unfreeze it here and
267 * free the resources used for freezing the devq. Must clear the
268 * INRESET flag as well or scsi I/O will not work.
270 if (tm->cm_targ != NULL) {
271 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
272 target_id = tm->cm_targ->tid;
275 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
277 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
278 xpt_free_path(tm->cm_ccb->ccb_h.path);
279 xpt_free_ccb(tm->cm_ccb);
282 mpr_free_high_priority_command(sc, tm);
286 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
288 struct mprsas_softc *sassc = sc->sassc;
290 target_id_t targetid;
294 pathid = cam_sim_path(sassc->sim);
296 targetid = CAM_TARGET_WILDCARD;
298 targetid = targ - sassc->targets;
301 * Allocate a CCB and schedule a rescan.
303 ccb = xpt_alloc_ccb_nowait();
305 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
309 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
310 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
311 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
316 if (targetid == CAM_TARGET_WILDCARD)
317 ccb->ccb_h.func_code = XPT_SCAN_BUS;
319 ccb->ccb_h.func_code = XPT_SCAN_TGT;
321 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
336 /* No need to be in here if debugging isn't enabled */
337 if ((cm->cm_sc->mpr_debug & level) == 0)
340 sbuf_new(&sb, str, sizeof(str), 0);
344 if (cm->cm_ccb != NULL) {
345 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
347 sbuf_cat(&sb, path_str);
348 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
349 scsi_command_string(&cm->cm_ccb->csio, &sb);
350 sbuf_printf(&sb, "length %d ",
351 cm->cm_ccb->csio.dxfer_len);
354 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
355 cam_sim_name(cm->cm_sc->sassc->sim),
356 cam_sim_unit(cm->cm_sc->sassc->sim),
357 cam_sim_bus(cm->cm_sc->sassc->sim),
358 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
362 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
363 sbuf_vprintf(&sb, fmt, ap);
365 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
373 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
374 struct mprsas_target *targ;
379 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
380 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
384 /* XXX retry the remove after the diag reset completes? */
385 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
386 "0x%04x\n", __func__, handle);
387 mprsas_free_tm(sc, tm);
391 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
392 MPI2_IOCSTATUS_SUCCESS) {
393 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
394 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
397 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
398 le32toh(reply->TerminationCount));
399 mpr_free_reply(sc, tm->cm_reply_data);
400 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
402 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
406 * Don't clear target if remove fails because things will get confusing.
407 * Leave the devname and sasaddr intact so that we know to avoid reusing
408 * this target id if possible, and so we can assign the same target id
409 * to this device if it comes back in the future.
411 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
412 MPI2_IOCSTATUS_SUCCESS) {
415 targ->encl_handle = 0x0;
416 targ->encl_level_valid = 0x0;
417 targ->encl_level = 0x0;
418 targ->connector_name[0] = ' ';
419 targ->connector_name[1] = ' ';
420 targ->connector_name[2] = ' ';
421 targ->connector_name[3] = ' ';
422 targ->encl_slot = 0x0;
423 targ->exp_dev_handle = 0x0;
425 targ->linkrate = 0x0;
428 targ->scsi_req_desc_type = 0;
431 mprsas_free_tm(sc, tm);
436 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437 * Otherwise Volume Delete is same as Bare Drive Removal.
440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
442 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443 struct mpr_softc *sc;
444 struct mpr_command *cm;
445 struct mprsas_target *targ = NULL;
447 MPR_FUNCTRACE(sassc->sc);
450 targ = mprsas_find_target_by_handle(sassc, 0, handle);
452 /* FIXME: what is the action? */
453 /* We don't know about this device? */
454 mpr_dprint(sc, MPR_ERROR,
455 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
459 targ->flags |= MPRSAS_TARGET_INREMOVAL;
461 cm = mprsas_alloc_tm(sc);
463 mpr_dprint(sc, MPR_ERROR,
464 "%s: command alloc failure\n", __func__);
468 mprsas_rescan_target(sc, targ);
470 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471 req->DevHandle = targ->handle;
472 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
474 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
475 /* SAS Hard Link Reset / SATA Link Reset */
476 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
478 /* PCIe Protocol Level Reset*/
480 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
485 cm->cm_complete = mprsas_remove_volume;
486 cm->cm_complete_data = (void *)(uintptr_t)handle;
488 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
489 __func__, targ->tid);
490 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
492 mpr_map_command(sc, cm);
496 * The firmware performs debounce on the link to avoid transient link errors
497 * and false removals. When it does decide that link has been lost and a
498 * device needs to go away, it expects that the host will perform a target reset
499 * and then an op remove. The reset has the side-effect of aborting any
500 * outstanding requests for the device, which is required for the op-remove to
501 * succeed. It's not clear if the host should check for the device coming back
502 * alive after the reset.
505 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
507 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
508 struct mpr_softc *sc;
509 struct mpr_command *tm;
510 struct mprsas_target *targ = NULL;
512 MPR_FUNCTRACE(sassc->sc);
516 targ = mprsas_find_target_by_handle(sassc, 0, handle);
518 /* FIXME: what is the action? */
519 /* We don't know about this device? */
520 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
525 targ->flags |= MPRSAS_TARGET_INREMOVAL;
527 tm = mprsas_alloc_tm(sc);
529 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
534 mprsas_rescan_target(sc, targ);
536 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
537 memset(req, 0, sizeof(*req));
538 req->DevHandle = htole16(targ->handle);
539 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
541 /* SAS Hard Link Reset / SATA Link Reset */
542 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
546 tm->cm_complete = mprsas_remove_device;
547 tm->cm_complete_data = (void *)(uintptr_t)handle;
549 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
550 __func__, targ->tid);
551 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
553 mpr_map_command(sc, tm);
557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
559 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
560 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
561 struct mprsas_target *targ;
566 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
567 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
571 * Currently there should be no way we can hit this case. It only
572 * happens when we have a failure to allocate chain frames, and
573 * task management commands don't have S/G lists.
575 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
576 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
577 "handle %#04x! This should not happen!\n", __func__,
578 tm->cm_flags, handle);
582 /* XXX retry the remove after the diag reset completes? */
583 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
584 "0x%04x\n", __func__, handle);
585 mprsas_free_tm(sc, tm);
589 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
590 MPI2_IOCSTATUS_SUCCESS) {
591 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
592 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
595 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
596 le32toh(reply->TerminationCount));
597 mpr_free_reply(sc, tm->cm_reply_data);
598 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
600 /* Reuse the existing command */
601 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
602 memset(req, 0, sizeof(*req));
603 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
604 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
605 req->DevHandle = htole16(handle);
607 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
608 tm->cm_complete = mprsas_remove_complete;
609 tm->cm_complete_data = (void *)(uintptr_t)handle;
612 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
613 * They should be aborted or time out and we'll kick thus off there
616 if (TAILQ_FIRST(&targ->commands) == NULL) {
617 mpr_dprint(sc, MPR_INFO, "No pending commands: starting remove_device\n");
618 mpr_map_command(sc, tm);
619 targ->pending_remove_tm = NULL;
621 targ->pending_remove_tm = tm;
624 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
626 if (targ->encl_level_valid) {
627 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
628 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
629 targ->connector_name);
634 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
636 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
638 struct mprsas_target *targ;
639 struct mprsas_lun *lun;
643 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
644 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
649 * At this point, we should have no pending commands for the target.
650 * The remove target has just completed.
652 KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
653 ("%s: no commands should be pending\n", __func__));
656 * Currently there should be no way we can hit this case. It only
657 * happens when we have a failure to allocate chain frames, and
658 * task management commands don't have S/G lists.
660 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
661 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
662 "handle %#04x! This should not happen!\n", __func__,
663 tm->cm_flags, handle);
664 mprsas_free_tm(sc, tm);
669 /* most likely a chip reset */
670 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
671 "0x%04x\n", __func__, handle);
672 mprsas_free_tm(sc, tm);
676 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
677 __func__, handle, le16toh(reply->IOCStatus));
680 * Don't clear target if remove fails because things will get confusing.
681 * Leave the devname and sasaddr intact so that we know to avoid reusing
682 * this target id if possible, and so we can assign the same target id
683 * to this device if it comes back in the future.
685 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
686 MPI2_IOCSTATUS_SUCCESS) {
688 targ->encl_handle = 0x0;
689 targ->encl_level_valid = 0x0;
690 targ->encl_level = 0x0;
691 targ->connector_name[0] = ' ';
692 targ->connector_name[1] = ' ';
693 targ->connector_name[2] = ' ';
694 targ->connector_name[3] = ' ';
695 targ->encl_slot = 0x0;
696 targ->exp_dev_handle = 0x0;
698 targ->linkrate = 0x0;
701 targ->scsi_req_desc_type = 0;
703 while (!SLIST_EMPTY(&targ->luns)) {
704 lun = SLIST_FIRST(&targ->luns);
705 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
710 mprsas_free_tm(sc, tm);
714 mprsas_register_events(struct mpr_softc *sc)
719 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
720 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
721 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
722 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
723 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
724 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
725 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
726 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
727 setbit(events, MPI2_EVENT_IR_VOLUME);
728 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
729 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
730 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
731 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
732 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
733 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
734 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
735 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
736 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
737 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
741 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
742 &sc->sassc->mprsas_eh);
748 mpr_attach_sas(struct mpr_softc *sc)
750 struct mprsas_softc *sassc;
752 int unit, error = 0, reqs;
755 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
757 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
759 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
760 "Cannot allocate SAS subsystem memory\n");
765 * XXX MaxTargets could change during a reinit. Since we don't
766 * resize the targets[] array during such an event, cache the value
767 * of MaxTargets here so that we don't get into trouble later. This
768 * should move into the reinit logic.
770 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
771 sassc->targets = malloc(sizeof(struct mprsas_target) *
772 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
773 if (!sassc->targets) {
774 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
775 "Cannot allocate SAS target memory\n");
782 reqs = sc->num_reqs - sc->num_prireqs - 1;
783 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
784 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
789 unit = device_get_unit(sc->mpr_dev);
790 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
791 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
792 if (sassc->sim == NULL) {
793 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
798 TAILQ_INIT(&sassc->ev_queue);
800 /* Initialize taskqueue for Event Handling */
801 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
802 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
803 taskqueue_thread_enqueue, &sassc->ev_tq);
804 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
805 device_get_nameunit(sc->mpr_dev));
810 * XXX There should be a bus for every port on the adapter, but since
811 * we're just going to fake the topology for now, we'll pretend that
812 * everything is just a target on a single bus.
814 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
815 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
816 "Error %d registering SCSI bus\n", error);
822 * Assume that discovery events will start right away.
824 * Hold off boot until discovery is complete.
826 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
827 sc->sassc->startup_refcount = 0;
828 mprsas_startup_increment(sassc);
830 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
833 * Register for async events so we can determine the EEDP
834 * capabilities of devices.
836 status = xpt_create_path(&sassc->path, /*periph*/NULL,
837 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
839 if (status != CAM_REQ_CMP) {
840 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
841 "Error %#x creating sim path\n", status);
846 #if (__FreeBSD_version >= 1000006) || \
847 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
848 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
850 event = AC_FOUND_DEVICE;
854 * Prior to the CAM locking improvements, we can't call
855 * xpt_register_async() with a particular path specified.
857 * If a path isn't specified, xpt_register_async() will
858 * generate a wildcard path and acquire the XPT lock while
859 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
860 * It will then drop the XPT lock once that is done.
862 * If a path is specified for xpt_register_async(), it will
863 * not acquire and drop the XPT lock around the call to
864 * xpt_action(). xpt_action() asserts that the caller
865 * holds the SIM lock, so the SIM lock has to be held when
866 * calling xpt_register_async() when the path is specified.
868 * But xpt_register_async calls xpt_for_all_devices(),
869 * which calls xptbustraverse(), which will acquire each
870 * SIM lock. When it traverses our particular bus, it will
871 * necessarily acquire the SIM lock, which will lead to a
872 * recursive lock acquisition.
874 * The CAM locking changes fix this problem by acquiring
875 * the XPT topology lock around bus traversal in
876 * xptbustraverse(), so the caller can hold the SIM lock
877 * and it does not cause a recursive lock acquisition.
879 * These __FreeBSD_version values are approximate, especially
880 * for stable/10, which is two months later than the actual
884 #if (__FreeBSD_version < 1000703) || \
885 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
887 status = xpt_register_async(event, mprsas_async, sc,
891 status = xpt_register_async(event, mprsas_async, sc,
895 if (status != CAM_REQ_CMP) {
896 mpr_dprint(sc, MPR_ERROR,
897 "Error %#x registering async handler for "
898 "AC_ADVINFO_CHANGED events\n", status);
899 xpt_free_path(sassc->path);
903 if (status != CAM_REQ_CMP) {
905 * EEDP use is the exception, not the rule.
906 * Warn the user, but do not fail to attach.
908 mpr_printf(sc, "EEDP capabilities disabled.\n");
913 mprsas_register_events(sc);
918 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
923 mpr_detach_sas(struct mpr_softc *sc)
925 struct mprsas_softc *sassc;
926 struct mprsas_lun *lun, *lun_tmp;
927 struct mprsas_target *targ;
932 if (sc->sassc == NULL)
936 mpr_deregister_events(sc, sassc->mprsas_eh);
939 * Drain and free the event handling taskqueue with the lock
940 * unheld so that any parallel processing tasks drain properly
941 * without deadlocking.
943 if (sassc->ev_tq != NULL)
944 taskqueue_free(sassc->ev_tq);
946 /* Make sure CAM doesn't wedge if we had to bail out early. */
949 while (sassc->startup_refcount != 0)
950 mprsas_startup_decrement(sassc);
952 /* Deregister our async handler */
953 if (sassc->path != NULL) {
954 xpt_register_async(0, mprsas_async, sc, sassc->path);
955 xpt_free_path(sassc->path);
959 if (sassc->flags & MPRSAS_IN_STARTUP)
960 xpt_release_simq(sassc->sim, 1);
962 if (sassc->sim != NULL) {
963 xpt_bus_deregister(cam_sim_path(sassc->sim));
964 cam_sim_free(sassc->sim, FALSE);
969 if (sassc->devq != NULL)
970 cam_simq_free(sassc->devq);
972 for (i = 0; i < sassc->maxtargets; i++) {
973 targ = &sassc->targets[i];
974 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
978 free(sassc->targets, M_MPR);
986 mprsas_discovery_end(struct mprsas_softc *sassc)
988 struct mpr_softc *sc = sassc->sc;
992 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
993 callout_stop(&sassc->discovery_callout);
996 * After discovery has completed, check the mapping table for any
997 * missing devices and update their missing counts. Only do this once
998 * whenever the driver is initialized so that missing counts aren't
999 * updated unnecessarily. Note that just because discovery has
1000 * completed doesn't mean that events have been processed yet. The
1001 * check_devices function is a callout timer that checks if ALL devices
1002 * are missing. If so, it will wait a little longer for events to
1003 * complete and keep resetting itself until some device in the mapping
1004 * table is not missing, meaning that event processing has started.
1006 if (sc->track_mapping_events) {
1007 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
1008 "completed. Check for missing devices in the mapping "
1010 callout_reset(&sc->device_check_callout,
1011 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1017 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1019 struct mprsas_softc *sassc;
1021 sassc = cam_sim_softc(sim);
1023 MPR_FUNCTRACE(sassc->sc);
1024 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1025 ccb->ccb_h.func_code);
1026 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1028 switch (ccb->ccb_h.func_code) {
1031 struct ccb_pathinq *cpi = &ccb->cpi;
1032 struct mpr_softc *sc = sassc->sc;
1034 cpi->version_num = 1;
1035 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1036 cpi->target_sprt = 0;
1037 #if (__FreeBSD_version >= 1000039) || \
1038 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1039 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1041 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1043 cpi->hba_eng_cnt = 0;
1044 cpi->max_target = sassc->maxtargets - 1;
1048 * initiator_id is set here to an ID outside the set of valid
1049 * target IDs (including volumes).
1051 cpi->initiator_id = sassc->maxtargets;
1052 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1053 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1054 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1055 cpi->unit_number = cam_sim_unit(sim);
1056 cpi->bus_id = cam_sim_bus(sim);
1058 * XXXSLM-I think this needs to change based on config page or
1059 * something instead of hardcoded to 150000.
1061 cpi->base_transfer_speed = 150000;
1062 cpi->transport = XPORT_SAS;
1063 cpi->transport_version = 0;
1064 cpi->protocol = PROTO_SCSI;
1065 cpi->protocol_version = SCSI_REV_SPC;
1066 cpi->maxio = sc->maxio;
1067 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1070 case XPT_GET_TRAN_SETTINGS:
1072 struct ccb_trans_settings *cts;
1073 struct ccb_trans_settings_sas *sas;
1074 struct ccb_trans_settings_scsi *scsi;
1075 struct mprsas_target *targ;
1078 sas = &cts->xport_specific.sas;
1079 scsi = &cts->proto_specific.scsi;
1081 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1082 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1083 cts->ccb_h.target_id));
1084 targ = &sassc->targets[cts->ccb_h.target_id];
1085 if (targ->handle == 0x0) {
1086 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1090 cts->protocol_version = SCSI_REV_SPC2;
1091 cts->transport = XPORT_SAS;
1092 cts->transport_version = 0;
1094 sas->valid = CTS_SAS_VALID_SPEED;
1095 switch (targ->linkrate) {
1097 sas->bitrate = 150000;
1100 sas->bitrate = 300000;
1103 sas->bitrate = 600000;
1106 sas->bitrate = 1200000;
1112 cts->protocol = PROTO_SCSI;
1113 scsi->valid = CTS_SCSI_VALID_TQ;
1114 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1116 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1119 case XPT_CALC_GEOMETRY:
1120 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1121 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1124 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1126 mprsas_action_resetdev(sassc, ccb);
1131 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1132 "for abort or reset\n");
1133 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1136 mprsas_action_scsiio(sassc, ccb);
1138 #if __FreeBSD_version >= 900026
1140 mprsas_action_smpio(sassc, ccb);
1144 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1152 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1153 target_id_t target_id, lun_id_t lun_id)
1155 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1156 struct cam_path *path;
1158 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1159 ac_code, target_id, (uintmax_t)lun_id);
1161 if (xpt_create_path(&path, NULL,
1162 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1163 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1168 xpt_async(ac_code, path, NULL);
1169 xpt_free_path(path);
1173 mprsas_complete_all_commands(struct mpr_softc *sc)
1175 struct mpr_command *cm;
1180 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1182 /* complete all commands with a NULL reply */
1183 for (i = 1; i < sc->num_reqs; i++) {
1184 cm = &sc->commands[i];
1185 if (cm->cm_state == MPR_CM_STATE_FREE)
1188 cm->cm_state = MPR_CM_STATE_BUSY;
1189 cm->cm_reply = NULL;
1192 if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1194 free(cm->cm_data, M_MPR);
1198 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1199 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1201 if (cm->cm_complete != NULL) {
1202 mprsas_log_command(cm, MPR_RECOVERY,
1203 "completing cm %p state %x ccb %p for diag reset\n",
1204 cm, cm->cm_state, cm->cm_ccb);
1205 cm->cm_complete(sc, cm);
1207 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1208 mprsas_log_command(cm, MPR_RECOVERY,
1209 "waking up cm %p state %x ccb %p for diag reset\n",
1210 cm, cm->cm_state, cm->cm_ccb);
1215 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1216 /* this should never happen, but if it does, log */
1217 mprsas_log_command(cm, MPR_RECOVERY,
1218 "cm %p state %x flags 0x%x ccb %p during diag "
1219 "reset\n", cm, cm->cm_state, cm->cm_flags,
1224 sc->io_cmds_active = 0;
1228 mprsas_handle_reinit(struct mpr_softc *sc)
1232 /* Go back into startup mode and freeze the simq, so that CAM
1233 * doesn't send any commands until after we've rediscovered all
1234 * targets and found the proper device handles for them.
1236 * After the reset, portenable will trigger discovery, and after all
1237 * discovery-related activities have finished, the simq will be
1240 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1241 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1242 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1243 mprsas_startup_increment(sc->sassc);
1245 /* notify CAM of a bus reset */
1246 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1249 /* complete and cleanup after all outstanding commands */
1250 mprsas_complete_all_commands(sc);
1252 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1253 __func__, sc->sassc->startup_refcount);
1255 /* zero all the target handles, since they may change after the
1256 * reset, and we have to rediscover all the targets and use the new
1259 for (i = 0; i < sc->sassc->maxtargets; i++) {
1260 if (sc->sassc->targets[i].outstanding != 0)
1261 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1262 i, sc->sassc->targets[i].outstanding);
1263 sc->sassc->targets[i].handle = 0x0;
1264 sc->sassc->targets[i].exp_dev_handle = 0x0;
1265 sc->sassc->targets[i].outstanding = 0;
1266 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1270 mprsas_tm_timeout(void *data)
1272 struct mpr_command *tm = data;
1273 struct mpr_softc *sc = tm->cm_sc;
1275 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1277 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1280 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1281 ("command not inqueue\n"));
1283 tm->cm_state = MPR_CM_STATE_BUSY;
1288 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1290 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1291 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1292 unsigned int cm_count = 0;
1293 struct mpr_command *cm;
1294 struct mprsas_target *targ;
1296 callout_stop(&tm->cm_callout);
1298 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1299 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1303 * Currently there should be no way we can hit this case. It only
1304 * happens when we have a failure to allocate chain frames, and
1305 * task management commands don't have S/G lists.
1307 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1308 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1309 "%s: cm_flags = %#x for LUN reset! "
1310 "This should not happen!\n", __func__, tm->cm_flags);
1311 mprsas_free_tm(sc, tm);
1315 if (reply == NULL) {
1316 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1318 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1319 /* this completion was due to a reset, just cleanup */
1320 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1321 "reset, ignoring NULL LUN reset reply\n");
1323 mprsas_free_tm(sc, tm);
1326 /* we should have gotten a reply. */
1327 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1328 "LUN reset attempt, resetting controller\n");
1334 mpr_dprint(sc, MPR_RECOVERY,
1335 "logical unit reset status 0x%x code 0x%x count %u\n",
1336 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1337 le32toh(reply->TerminationCount));
1340 * See if there are any outstanding commands for this LUN.
1341 * This could be made more efficient by using a per-LU data
1342 * structure of some sort.
1344 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1345 if (cm->cm_lun == tm->cm_lun)
1349 if (cm_count == 0) {
1350 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1351 "Finished recovery after LUN reset for target %u\n",
1354 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1358 * We've finished recovery for this logical unit. check and
1359 * see if some other logical unit has a timedout command
1360 * that needs to be processed.
1362 cm = TAILQ_FIRST(&targ->timedout_commands);
1364 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1365 "More commands to abort for target %u\n", targ->tid);
1366 mprsas_send_abort(sc, tm, cm);
1369 mprsas_free_tm(sc, tm);
1372 /* if we still have commands for this LUN, the reset
1373 * effectively failed, regardless of the status reported.
1374 * Escalate to a target reset.
1376 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1377 "logical unit reset complete for target %u, but still "
1378 "have %u command(s), sending target reset\n", targ->tid,
1380 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1381 mprsas_send_reset(sc, tm,
1382 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1389 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1391 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1392 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1393 struct mprsas_target *targ;
1395 callout_stop(&tm->cm_callout);
1397 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1398 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1402 * Currently there should be no way we can hit this case. It only
1403 * happens when we have a failure to allocate chain frames, and
1404 * task management commands don't have S/G lists.
1406 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1407 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1408 "reset! This should not happen!\n", __func__, tm->cm_flags);
1409 mprsas_free_tm(sc, tm);
1413 if (reply == NULL) {
1414 mpr_dprint(sc, MPR_RECOVERY,
1415 "NULL target reset reply for tm %p TaskMID %u\n",
1416 tm, le16toh(req->TaskMID));
1417 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1418 /* this completion was due to a reset, just cleanup */
1419 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1420 "reset, ignoring NULL target reset reply\n");
1422 mprsas_free_tm(sc, tm);
1425 /* we should have gotten a reply. */
1426 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1427 "target reset attempt, resetting controller\n");
1433 mpr_dprint(sc, MPR_RECOVERY,
1434 "target reset status 0x%x code 0x%x count %u\n",
1435 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1436 le32toh(reply->TerminationCount));
1438 if (targ->outstanding == 0) {
1440 * We've finished recovery for this target and all
1441 * of its logical units.
1443 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1444 "Finished reset recovery for target %u\n", targ->tid);
1446 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1450 mprsas_free_tm(sc, tm);
1453 * After a target reset, if this target still has
1454 * outstanding commands, the reset effectively failed,
1455 * regardless of the status reported. escalate.
1457 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1458 "Target reset complete for target %u, but still have %u "
1459 "command(s), resetting controller\n", targ->tid,
1465 #define MPR_RESET_TIMEOUT 30
1468 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1470 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1471 struct mprsas_target *target;
1474 target = tm->cm_targ;
1475 if (target->handle == 0) {
1476 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1477 "%d\n", __func__, target->tid);
1481 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1482 req->DevHandle = htole16(target->handle);
1483 req->TaskType = type;
1485 if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1486 timeout = MPR_RESET_TIMEOUT;
1488 * Target reset method =
1489 * SAS Hard Link Reset / SATA Link Reset
1491 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1493 timeout = (target->controller_reset_timeout) ? (
1494 target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1495 /* PCIe Protocol Level Reset*/
1497 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1500 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1501 /* XXX Need to handle invalid LUNs */
1502 MPR_SET_LUN(req->LUN, tm->cm_lun);
1503 tm->cm_targ->logical_unit_resets++;
1504 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1505 "Sending logical unit reset to target %u lun %d\n",
1506 target->tid, tm->cm_lun);
1507 tm->cm_complete = mprsas_logical_unit_reset_complete;
1508 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1509 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1510 tm->cm_targ->target_resets++;
1511 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1512 "Sending target reset to target %u\n", target->tid);
1513 tm->cm_complete = mprsas_target_reset_complete;
1514 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1517 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1521 if (target->encl_level_valid) {
1522 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1523 "At enclosure level %d, slot %d, connector name (%4s)\n",
1524 target->encl_level, target->encl_slot,
1525 target->connector_name);
1529 tm->cm_complete_data = (void *)tm;
1531 callout_reset(&tm->cm_callout, timeout * hz,
1532 mprsas_tm_timeout, tm);
1534 err = mpr_map_command(sc, tm);
1536 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1537 "error %d sending reset type %u\n", err, type);
1544 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1546 struct mpr_command *cm;
1547 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1548 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1549 struct mprsas_target *targ;
1551 callout_stop(&tm->cm_callout);
1553 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1554 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1558 * Currently there should be no way we can hit this case. It only
1559 * happens when we have a failure to allocate chain frames, and
1560 * task management commands don't have S/G lists.
1562 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1563 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1564 "cm_flags = %#x for abort %p TaskMID %u!\n",
1565 tm->cm_flags, tm, le16toh(req->TaskMID));
1566 mprsas_free_tm(sc, tm);
1570 if (reply == NULL) {
1571 mpr_dprint(sc, MPR_RECOVERY,
1572 "NULL abort reply for tm %p TaskMID %u\n",
1573 tm, le16toh(req->TaskMID));
1574 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1575 /* this completion was due to a reset, just cleanup */
1576 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1577 "reset, ignoring NULL abort reply\n");
1579 mprsas_free_tm(sc, tm);
1581 /* we should have gotten a reply. */
1582 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1583 "abort attempt, resetting controller\n");
1589 mpr_dprint(sc, MPR_RECOVERY,
1590 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1591 le16toh(req->TaskMID),
1592 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1593 le32toh(reply->TerminationCount));
1595 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1598 * if there are no more timedout commands, we're done with
1599 * error recovery for this target.
1601 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1602 "Finished abort recovery for target %u\n", targ->tid);
1604 mprsas_free_tm(sc, tm);
1605 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1606 /* abort success, but we have more timedout commands to abort */
1607 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1608 "Continuing abort recovery for target %u\n", targ->tid);
1609 mprsas_send_abort(sc, tm, cm);
1612 * we didn't get a command completion, so the abort
1613 * failed as far as we're concerned. escalate.
1615 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1616 "Abort failed for target %u, sending logical unit reset\n",
1619 mprsas_send_reset(sc, tm,
1620 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1624 #define MPR_ABORT_TIMEOUT 5
1627 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1628 struct mpr_command *cm)
1630 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1631 struct mprsas_target *targ;
1635 if (targ->handle == 0) {
1636 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1637 "%s null devhandle for target_id %d\n",
1638 __func__, cm->cm_ccb->ccb_h.target_id);
1642 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1643 "Aborting command %p\n", cm);
1645 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1646 req->DevHandle = htole16(targ->handle);
1647 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1649 /* XXX Need to handle invalid LUNs */
1650 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1652 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1655 tm->cm_complete = mprsas_abort_complete;
1656 tm->cm_complete_data = (void *)tm;
1657 tm->cm_targ = cm->cm_targ;
1658 tm->cm_lun = cm->cm_lun;
1660 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1661 timeout = MPR_ABORT_TIMEOUT;
1663 timeout = sc->nvme_abort_timeout;
1665 callout_reset(&tm->cm_callout, timeout * hz,
1666 mprsas_tm_timeout, tm);
1670 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1672 err = mpr_map_command(sc, tm);
1674 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1675 "error %d sending abort for cm %p SMID %u\n",
1676 err, cm, req->TaskMID);
1681 mprsas_scsiio_timeout(void *data)
1683 sbintime_t elapsed, now;
1685 struct mpr_softc *sc;
1686 struct mpr_command *cm;
1687 struct mprsas_target *targ;
1689 cm = (struct mpr_command *)data;
1695 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1697 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1700 * Run the interrupt handler to make sure it's not pending. This
1701 * isn't perfect because the command could have already completed
1702 * and been re-used, though this is unlikely.
1704 mpr_intr_locked(sc);
1705 if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1706 mprsas_log_command(cm, MPR_XINFO,
1707 "SCSI command %p almost timed out\n", cm);
1711 if (cm->cm_ccb == NULL) {
1712 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1719 elapsed = now - ccb->ccb_h.qos.sim_data;
1720 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1721 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1722 targ->tid, targ->handle, ccb->ccb_h.timeout,
1723 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1724 if (targ->encl_level_valid) {
1725 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1726 "At enclosure level %d, slot %d, connector name (%4s)\n",
1727 targ->encl_level, targ->encl_slot, targ->connector_name);
1730 /* XXX first, check the firmware state, to see if it's still
1731 * operational. if not, do a diag reset.
1733 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1734 cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1735 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1737 if (targ->tm != NULL) {
1738 /* target already in recovery, just queue up another
1739 * timedout command to be processed later.
1741 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1742 "processing by tm %p\n", cm, targ->tm);
1744 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1746 /* start recovery by aborting the first timedout command */
1747 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1748 "Sending abort to target %u for SMID %d\n", targ->tid,
1749 cm->cm_desc.Default.SMID);
1750 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1752 mprsas_send_abort(sc, targ->tm, cm);
1755 /* XXX queue this target up for recovery once a TM becomes
1756 * available. The firmware only has a limited number of
1757 * HighPriority credits for the high priority requests used
1758 * for task management, and we ran out.
1760 * Isilon: don't worry about this for now, since we have
1761 * more credits than disks in an enclosure, and limit
1762 * ourselves to one TM per target for recovery.
1764 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1765 "timedout cm %p failed to allocate a tm\n", cm);
1770 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1772 * Return 0 - for success,
1773 * 1 - to immediately return back the command with success status to CAM
1774 * negative value - to fallback to firmware path i.e. issue scsi unmap
1775 * to FW without any translation.
1778 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1779 union ccb *ccb, struct mprsas_target *targ)
1781 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1782 struct ccb_scsiio *csio;
1783 struct unmap_parm_list *plist;
1784 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1785 struct nvme_command *c;
1787 uint16_t ndesc, list_len, data_length;
1788 struct mpr_prp_page *prp_page_info;
1789 uint64_t nvme_dsm_ranges_dma_handle;
1792 #if __FreeBSD_version >= 1100103
1793 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1795 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1796 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1797 ccb->csio.cdb_io.cdb_ptr[8]);
1799 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1800 ccb->csio.cdb_io.cdb_bytes[8]);
1804 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1808 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1810 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1811 "save UNMAP data\n");
1815 /* Copy SCSI unmap data to a local buffer */
1816 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1818 /* return back the unmap command to CAM with success status,
1819 * if number of descripts is zero.
1821 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1823 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1824 "UNMAP cmd is Zero\n");
1829 data_length = ndesc * sizeof(struct nvme_dsm_range);
1830 if (data_length > targ->MDTS) {
1831 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1832 "Device's MDTS: %d\n", data_length, targ->MDTS);
1837 prp_page_info = mpr_alloc_prp_page(sc);
1838 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1839 "UNMAP command.\n", __func__));
1842 * Insert the allocated PRP page into the command's PRP page list. This
1843 * will be freed when the command is freed.
1845 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1847 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1848 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1850 bzero(nvme_dsm_ranges, data_length);
1852 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1853 * for each descriptors contained in SCSI UNMAP data.
1855 for (i = 0; i < ndesc; i++) {
1856 nvme_dsm_ranges[i].length =
1857 htole32(be32toh(plist->desc[i].nlb));
1858 nvme_dsm_ranges[i].starting_lba =
1859 htole64(be64toh(plist->desc[i].slba));
1860 nvme_dsm_ranges[i].attributes = 0;
1863 /* Build MPI2.6's NVMe Encapsulated Request Message */
1864 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1865 bzero(req, sizeof(*req));
1866 req->DevHandle = htole16(targ->handle);
1867 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1868 req->Flags = MPI26_NVME_FLAGS_WRITE;
1869 req->ErrorResponseBaseAddress.High =
1870 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1871 req->ErrorResponseBaseAddress.Low =
1872 htole32(cm->cm_sense_busaddr);
1873 req->ErrorResponseAllocationLength =
1874 htole16(sizeof(struct nvme_completion));
1875 req->EncapsulatedCommandLength =
1876 htole16(sizeof(struct nvme_command));
1877 req->DataLength = htole32(data_length);
1879 /* Build NVMe DSM command */
1880 c = (struct nvme_command *) req->NVMe_Command;
1881 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1882 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1883 c->cdw10 = htole32(ndesc - 1);
1884 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1886 cm->cm_length = data_length;
1889 cm->cm_complete = mprsas_scsiio_complete;
1890 cm->cm_complete_data = ccb;
1892 cm->cm_lun = csio->ccb_h.target_lun;
1895 cm->cm_desc.Default.RequestFlags =
1896 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1898 csio->ccb_h.qos.sim_data = sbinuptime();
1899 #if __FreeBSD_version >= 1000029
1900 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1901 mprsas_scsiio_timeout, cm, 0);
1902 #else //__FreeBSD_version < 1000029
1903 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1904 mprsas_scsiio_timeout, cm);
1905 #endif //__FreeBSD_version >= 1000029
1908 targ->outstanding++;
1909 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1910 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1912 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1913 __func__, cm, ccb, targ->outstanding);
1915 mpr_build_nvme_prp(sc, cm, req,
1916 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1917 mpr_map_command(sc, cm);
1925 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1927 MPI2_SCSI_IO_REQUEST *req;
1928 struct ccb_scsiio *csio;
1929 struct mpr_softc *sc;
1930 struct mprsas_target *targ;
1931 struct mprsas_lun *lun;
1932 struct mpr_command *cm;
1933 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1934 uint16_t eedp_flags;
1935 uint32_t mpi_control;
1940 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1943 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1944 ("Target %d out of bounds in XPT_SCSI_IO\n",
1945 csio->ccb_h.target_id));
1946 targ = &sassc->targets[csio->ccb_h.target_id];
1947 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1948 if (targ->handle == 0x0) {
1949 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1950 __func__, csio->ccb_h.target_id);
1951 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1955 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1956 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1957 "supported %u\n", __func__, csio->ccb_h.target_id);
1958 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1963 * Sometimes, it is possible to get a command that is not "In
1964 * Progress" and was actually aborted by the upper layer. Check for
1965 * this here and complete the command without error.
1967 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1968 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1969 "target %u\n", __func__, csio->ccb_h.target_id);
1974 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1975 * that the volume has timed out. We want volumes to be enumerated
1976 * until they are deleted/removed, not just failed. In either event,
1977 * we're removing the target due to a firmware event telling us
1978 * the device is now gone (as opposed to some transient event). Since
1979 * we're opting to remove failed devices from the OS's view, we need
1980 * to propagate that status up the stack.
1982 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1983 if (targ->devinfo == 0)
1984 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1986 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1991 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1992 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1993 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1999 * If target has a reset in progress, freeze the devq and return. The
2000 * devq will be released when the TM reset is finished.
2002 if (targ->flags & MPRSAS_TARGET_INRESET) {
2003 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
2004 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
2005 __func__, targ->tid);
2006 xpt_freeze_devq(ccb->ccb_h.path, 1);
2011 cm = mpr_alloc_command(sc);
2012 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
2014 mpr_free_command(sc, cm);
2016 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2017 xpt_freeze_simq(sassc->sim, 1);
2018 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2020 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2021 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2026 /* For NVME device's issue UNMAP command directly to NVME drives by
2027 * constructing equivalent native NVMe DataSetManagement command.
2029 #if __FreeBSD_version >= 1100103
2030 scsi_opcode = scsiio_cdb_ptr(csio)[0];
2032 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2033 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2035 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2037 if (scsi_opcode == UNMAP &&
2039 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2040 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2041 if (rc == 1) { /* return command to CAM with success status */
2042 mpr_free_command(sc, cm);
2043 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2046 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2050 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2051 bzero(req, sizeof(*req));
2052 req->DevHandle = htole16(targ->handle);
2053 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2055 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2056 req->SenseBufferLength = MPR_SENSE_LEN;
2058 req->ChainOffset = 0;
2059 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2064 req->DataLength = htole32(csio->dxfer_len);
2065 req->BidirectionalDataLength = 0;
2066 req->IoFlags = htole16(csio->cdb_len);
2069 /* Note: BiDirectional transfers are not supported */
2070 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2072 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2073 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2076 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2077 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2081 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2085 if (csio->cdb_len == 32)
2086 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2088 * It looks like the hardware doesn't require an explicit tag
2089 * number for each transaction. SAM Task Management not supported
2092 switch (csio->tag_action) {
2093 case MSG_HEAD_OF_Q_TAG:
2094 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2096 case MSG_ORDERED_Q_TAG:
2097 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2100 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2102 case CAM_TAG_ACTION_NONE:
2103 case MSG_SIMPLE_Q_TAG:
2105 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2108 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2109 req->Control = htole32(mpi_control);
2111 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2112 mpr_free_command(sc, cm);
2113 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2118 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2119 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2121 KASSERT(csio->cdb_len <= IOCDBLEN,
2122 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2123 "is not set", csio->cdb_len));
2124 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2126 req->IoFlags = htole16(csio->cdb_len);
2129 * Check if EEDP is supported and enabled. If it is then check if the
2130 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2131 * is formatted for EEDP support. If all of this is true, set CDB up
2132 * for EEDP transfer.
2134 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2135 if (sc->eedp_enabled && eedp_flags) {
2136 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2137 if (lun->lun_id == csio->ccb_h.target_lun) {
2142 if ((lun != NULL) && (lun->eedp_formatted)) {
2143 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2144 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2145 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2146 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2147 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2149 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2151 req->EEDPFlags = htole16(eedp_flags);
2154 * If CDB less than 32, fill in Primary Ref Tag with
2155 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2156 * already there. Also, set protection bit. FreeBSD
2157 * currently does not support CDBs bigger than 16, but
2158 * the code doesn't hurt, and will be here for the
2161 if (csio->cdb_len != 32) {
2162 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2163 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2164 PrimaryReferenceTag;
2165 for (i = 0; i < 4; i++) {
2167 req->CDB.CDB32[lba_byte + i];
2170 req->CDB.EEDP32.PrimaryReferenceTag =
2172 CDB.EEDP32.PrimaryReferenceTag);
2173 req->CDB.EEDP32.PrimaryApplicationTagMask =
2176 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2179 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2180 req->EEDPFlags = htole16(eedp_flags);
2181 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2187 cm->cm_length = csio->dxfer_len;
2188 if (cm->cm_length != 0) {
2190 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2194 cm->cm_sge = &req->SGL;
2195 cm->cm_sglsize = (32 - 24) * 4;
2196 cm->cm_complete = mprsas_scsiio_complete;
2197 cm->cm_complete_data = ccb;
2199 cm->cm_lun = csio->ccb_h.target_lun;
2202 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2203 * and set descriptor type.
2205 if (targ->scsi_req_desc_type ==
2206 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2207 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2208 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2209 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2210 if (!sc->atomic_desc_capable) {
2211 cm->cm_desc.FastPathSCSIIO.DevHandle =
2212 htole16(targ->handle);
2215 cm->cm_desc.SCSIIO.RequestFlags =
2216 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2217 if (!sc->atomic_desc_capable)
2218 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2221 csio->ccb_h.qos.sim_data = sbinuptime();
2222 #if __FreeBSD_version >= 1000029
2223 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2224 mprsas_scsiio_timeout, cm, 0);
2225 #else //__FreeBSD_version < 1000029
2226 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2227 mprsas_scsiio_timeout, cm);
2228 #endif //__FreeBSD_version >= 1000029
2231 targ->outstanding++;
2232 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2233 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2235 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2236 __func__, cm, ccb, targ->outstanding);
2238 mpr_map_command(sc, cm);
2243 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2246 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2247 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2251 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2252 MPI2_IOCSTATUS_MASK;
2253 u8 scsi_state = mpi_reply->SCSIState;
2254 u8 scsi_status = mpi_reply->SCSIStatus;
2255 char *desc_ioc_state = NULL;
2256 char *desc_scsi_status = NULL;
2257 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2259 if (log_info == 0x31170000)
2262 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2264 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2267 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2268 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2269 if (targ->encl_level_valid) {
2270 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2271 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2272 targ->connector_name);
2276 * We can add more detail about underflow data here
2279 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2280 "scsi_state %b\n", desc_scsi_status, scsi_status,
2281 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2282 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2284 if (sc->mpr_debug & MPR_XINFO &&
2285 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2286 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2287 scsi_sense_print(csio);
2288 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2291 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2292 response_info = le32toh(mpi_reply->ResponseInfo);
2293 response_bytes = (u8 *)&response_info;
2294 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2296 mpr_describe_table(mpr_scsi_taskmgmt_string,
2297 response_bytes[0]));
2301 /** mprsas_nvme_trans_status_code
2303 * Convert Native NVMe command error status to
2304 * equivalent SCSI error status.
2306 * Returns appropriate scsi_status
2309 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2310 struct mpr_command *cm)
2312 u8 status = MPI2_SCSI_STATUS_GOOD;
2313 int skey, asc, ascq;
2314 union ccb *ccb = cm->cm_complete_data;
2315 int returned_sense_len;
2318 sct = NVME_STATUS_GET_SCT(nvme_status);
2319 sc = NVME_STATUS_GET_SC(nvme_status);
2321 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2322 skey = SSD_KEY_ILLEGAL_REQUEST;
2323 asc = SCSI_ASC_NO_SENSE;
2324 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2327 case NVME_SCT_GENERIC:
2329 case NVME_SC_SUCCESS:
2330 status = MPI2_SCSI_STATUS_GOOD;
2331 skey = SSD_KEY_NO_SENSE;
2332 asc = SCSI_ASC_NO_SENSE;
2333 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2335 case NVME_SC_INVALID_OPCODE:
2336 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2337 skey = SSD_KEY_ILLEGAL_REQUEST;
2338 asc = SCSI_ASC_ILLEGAL_COMMAND;
2339 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2341 case NVME_SC_INVALID_FIELD:
2342 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2343 skey = SSD_KEY_ILLEGAL_REQUEST;
2344 asc = SCSI_ASC_INVALID_CDB;
2345 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2347 case NVME_SC_DATA_TRANSFER_ERROR:
2348 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2349 skey = SSD_KEY_MEDIUM_ERROR;
2350 asc = SCSI_ASC_NO_SENSE;
2351 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2353 case NVME_SC_ABORTED_POWER_LOSS:
2354 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2355 skey = SSD_KEY_ABORTED_COMMAND;
2356 asc = SCSI_ASC_WARNING;
2357 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2359 case NVME_SC_INTERNAL_DEVICE_ERROR:
2360 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2361 skey = SSD_KEY_HARDWARE_ERROR;
2362 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2363 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2365 case NVME_SC_ABORTED_BY_REQUEST:
2366 case NVME_SC_ABORTED_SQ_DELETION:
2367 case NVME_SC_ABORTED_FAILED_FUSED:
2368 case NVME_SC_ABORTED_MISSING_FUSED:
2369 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2370 skey = SSD_KEY_ABORTED_COMMAND;
2371 asc = SCSI_ASC_NO_SENSE;
2372 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2374 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2375 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2376 skey = SSD_KEY_ILLEGAL_REQUEST;
2377 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2378 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2380 case NVME_SC_LBA_OUT_OF_RANGE:
2381 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2382 skey = SSD_KEY_ILLEGAL_REQUEST;
2383 asc = SCSI_ASC_ILLEGAL_BLOCK;
2384 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2386 case NVME_SC_CAPACITY_EXCEEDED:
2387 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2388 skey = SSD_KEY_MEDIUM_ERROR;
2389 asc = SCSI_ASC_NO_SENSE;
2390 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2392 case NVME_SC_NAMESPACE_NOT_READY:
2393 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2394 skey = SSD_KEY_NOT_READY;
2395 asc = SCSI_ASC_LUN_NOT_READY;
2396 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2400 case NVME_SCT_COMMAND_SPECIFIC:
2402 case NVME_SC_INVALID_FORMAT:
2403 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2404 skey = SSD_KEY_ILLEGAL_REQUEST;
2405 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2406 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2408 case NVME_SC_CONFLICTING_ATTRIBUTES:
2409 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2410 skey = SSD_KEY_ILLEGAL_REQUEST;
2411 asc = SCSI_ASC_INVALID_CDB;
2412 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2416 case NVME_SCT_MEDIA_ERROR:
2418 case NVME_SC_WRITE_FAULTS:
2419 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2420 skey = SSD_KEY_MEDIUM_ERROR;
2421 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2422 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2424 case NVME_SC_UNRECOVERED_READ_ERROR:
2425 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2426 skey = SSD_KEY_MEDIUM_ERROR;
2427 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2428 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2430 case NVME_SC_GUARD_CHECK_ERROR:
2431 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2432 skey = SSD_KEY_MEDIUM_ERROR;
2433 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2434 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2436 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2437 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2438 skey = SSD_KEY_MEDIUM_ERROR;
2439 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2440 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2442 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2443 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2444 skey = SSD_KEY_MEDIUM_ERROR;
2445 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2446 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2448 case NVME_SC_COMPARE_FAILURE:
2449 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2450 skey = SSD_KEY_MISCOMPARE;
2451 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2452 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2454 case NVME_SC_ACCESS_DENIED:
2455 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2456 skey = SSD_KEY_ILLEGAL_REQUEST;
2457 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2458 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2464 returned_sense_len = sizeof(struct scsi_sense_data);
2465 if (returned_sense_len < ccb->csio.sense_len)
2466 ccb->csio.sense_resid = ccb->csio.sense_len -
2469 ccb->csio.sense_resid = 0;
2471 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2472 1, skey, asc, ascq, SSD_ELEM_NONE);
2473 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2478 /** mprsas_complete_nvme_unmap
2480 * Complete native NVMe command issued using NVMe Encapsulated
2484 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2486 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2487 struct nvme_completion *nvme_completion = NULL;
2488 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2490 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2491 if (le16toh(mpi_reply->ErrorResponseCount)){
2492 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2493 scsi_status = mprsas_nvme_trans_status_code(
2494 nvme_completion->status, cm);
2500 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2502 MPI2_SCSI_IO_REPLY *rep;
2504 struct ccb_scsiio *csio;
2505 struct mprsas_softc *sassc;
2506 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2507 u8 *TLR_bits, TLR_on, *scsi_cdb;
2510 struct mprsas_target *target;
2511 target_id_t target_id;
2514 mpr_dprint(sc, MPR_TRACE,
2515 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2516 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2517 cm->cm_targ->outstanding);
2519 callout_stop(&cm->cm_callout);
2520 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2523 ccb = cm->cm_complete_data;
2525 target_id = csio->ccb_h.target_id;
2526 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2528 * XXX KDM if the chain allocation fails, does it matter if we do
2529 * the sync and unload here? It is simpler to do it in every case,
2530 * assuming it doesn't cause problems.
2532 if (cm->cm_data != NULL) {
2533 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2534 dir = BUS_DMASYNC_POSTREAD;
2535 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2536 dir = BUS_DMASYNC_POSTWRITE;
2537 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2538 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2541 cm->cm_targ->completed++;
2542 cm->cm_targ->outstanding--;
2543 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2544 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2546 if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2547 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2548 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2549 ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2550 cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2551 if (cm->cm_reply != NULL)
2552 mprsas_log_command(cm, MPR_RECOVERY,
2553 "completed timedout cm %p ccb %p during recovery "
2554 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2555 le16toh(rep->IOCStatus), rep->SCSIStatus,
2556 rep->SCSIState, le32toh(rep->TransferCount));
2558 mprsas_log_command(cm, MPR_RECOVERY,
2559 "completed timedout cm %p ccb %p during recovery\n",
2561 } else if (cm->cm_targ->tm != NULL) {
2562 if (cm->cm_reply != NULL)
2563 mprsas_log_command(cm, MPR_RECOVERY,
2564 "completed cm %p ccb %p during recovery "
2565 "ioc %x scsi %x state %x xfer %u\n",
2566 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2567 rep->SCSIStatus, rep->SCSIState,
2568 le32toh(rep->TransferCount));
2570 mprsas_log_command(cm, MPR_RECOVERY,
2571 "completed cm %p ccb %p during recovery\n",
2573 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2574 mprsas_log_command(cm, MPR_RECOVERY,
2575 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2578 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2580 * We ran into an error after we tried to map the command,
2581 * so we're getting a callback without queueing the command
2582 * to the hardware. So we set the status here, and it will
2583 * be retained below. We'll go through the "fast path",
2584 * because there can be no reply when we haven't actually
2585 * gone out to the hardware.
2587 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2590 * Currently the only error included in the mask is
2591 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2592 * chain frames. We need to freeze the queue until we get
2593 * a command that completed without this error, which will
2594 * hopefully have some chain frames attached that we can
2595 * use. If we wanted to get smarter about it, we would
2596 * only unfreeze the queue in this condition when we're
2597 * sure that we're getting some chain frames back. That's
2598 * probably unnecessary.
2600 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2601 xpt_freeze_simq(sassc->sim, 1);
2602 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2603 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2604 "freezing SIM queue\n");
2609 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2610 * flag, and use it in a few places in the rest of this function for
2611 * convenience. Use the macro if available.
2613 #if __FreeBSD_version >= 1100103
2614 scsi_cdb = scsiio_cdb_ptr(csio);
2616 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2617 scsi_cdb = csio->cdb_io.cdb_ptr;
2619 scsi_cdb = csio->cdb_io.cdb_bytes;
2623 * If this is a Start Stop Unit command and it was issued by the driver
2624 * during shutdown, decrement the refcount to account for all of the
2625 * commands that were sent. All SSU commands should be completed before
2626 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2629 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2630 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2634 /* Take the fast path to completion */
2635 if (cm->cm_reply == NULL) {
2636 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2637 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2638 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2640 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2641 csio->scsi_status = SCSI_STATUS_OK;
2643 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2644 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2645 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2646 mpr_dprint(sc, MPR_XINFO,
2647 "Unfreezing SIM queue\n");
2652 * There are two scenarios where the status won't be
2653 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2654 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2656 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2658 * Freeze the dev queue so that commands are
2659 * executed in the correct order after error
2662 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2663 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2665 mpr_free_command(sc, cm);
2670 target = &sassc->targets[target_id];
2671 if (scsi_cdb[0] == UNMAP &&
2673 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2674 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2675 csio->scsi_status = rep->SCSIStatus;
2678 mprsas_log_command(cm, MPR_XINFO,
2679 "ioc %x scsi %x state %x xfer %u\n",
2680 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2681 le32toh(rep->TransferCount));
2683 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2684 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2685 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2687 case MPI2_IOCSTATUS_SUCCESS:
2688 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2689 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2690 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2691 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2693 /* Completion failed at the transport level. */
2694 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2695 MPI2_SCSI_STATE_TERMINATED)) {
2696 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2700 /* In a modern packetized environment, an autosense failure
2701 * implies that there's not much else that can be done to
2702 * recover the command.
2704 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2705 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2710 * CAM doesn't care about SAS Response Info data, but if this is
2711 * the state check if TLR should be done. If not, clear the
2712 * TLR_bits for the target.
2714 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2715 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2716 == MPR_SCSI_RI_INVALID_FRAME)) {
2717 sc->mapping_table[target_id].TLR_bits =
2718 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2722 * Intentionally override the normal SCSI status reporting
2723 * for these two cases. These are likely to happen in a
2724 * multi-initiator environment, and we want to make sure that
2725 * CAM retries these commands rather than fail them.
2727 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2728 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2729 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2733 /* Handle normal status and sense */
2734 csio->scsi_status = rep->SCSIStatus;
2735 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2736 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2738 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2740 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2741 int sense_len, returned_sense_len;
2743 returned_sense_len = min(le32toh(rep->SenseCount),
2744 sizeof(struct scsi_sense_data));
2745 if (returned_sense_len < csio->sense_len)
2746 csio->sense_resid = csio->sense_len -
2749 csio->sense_resid = 0;
2751 sense_len = min(returned_sense_len,
2752 csio->sense_len - csio->sense_resid);
2753 bzero(&csio->sense_data, sizeof(csio->sense_data));
2754 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2755 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2759 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2760 * and it's page code 0 (Supported Page List), and there is
2761 * inquiry data, and this is for a sequential access device, and
2762 * the device is an SSP target, and TLR is supported by the
2763 * controller, turn the TLR_bits value ON if page 0x90 is
2766 if ((scsi_cdb[0] == INQUIRY) &&
2767 (scsi_cdb[1] & SI_EVPD) &&
2768 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2769 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2770 (csio->data_ptr != NULL) &&
2771 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2772 (sc->control_TLR) &&
2773 (sc->mapping_table[target_id].device_info &
2774 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2775 vpd_list = (struct scsi_vpd_supported_page_list *)
2777 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2778 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2779 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2780 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2781 alloc_len -= csio->resid;
2782 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2783 if (vpd_list->list[i] == 0x90) {
2791 * If this is a SATA direct-access end device, mark it so that
2792 * a SCSI StartStopUnit command will be sent to it when the
2793 * driver is being shutdown.
2795 if ((scsi_cdb[0] == INQUIRY) &&
2796 (csio->data_ptr != NULL) &&
2797 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2798 (sc->mapping_table[target_id].device_info &
2799 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2800 ((sc->mapping_table[target_id].device_info &
2801 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2802 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2803 target = &sassc->targets[target_id];
2804 target->supports_SSU = TRUE;
2805 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2809 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2810 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2812 * If devinfo is 0 this will be a volume. In that case don't
2813 * tell CAM that the volume is not there. We want volumes to
2814 * be enumerated until they are deleted/removed, not just
2817 if (cm->cm_targ->devinfo == 0)
2818 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2820 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2822 case MPI2_IOCSTATUS_INVALID_SGL:
2823 mpr_print_scsiio_cmd(sc, cm);
2824 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2826 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2828 * This is one of the responses that comes back when an I/O
2829 * has been aborted. If it is because of a timeout that we
2830 * initiated, just set the status to CAM_CMD_TIMEOUT.
2831 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2832 * command is the same (it gets retried, subject to the
2833 * retry counter), the only difference is what gets printed
2836 if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2837 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2839 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2841 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2842 /* resid is ignored for this condition */
2844 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2846 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2847 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2849 * These can sometimes be transient transport-related
2850 * errors, and sometimes persistent drive-related errors.
2851 * We used to retry these without decrementing the retry
2852 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2853 * we hit a persistent drive problem that returns one of
2854 * these error codes, we would retry indefinitely. So,
2855 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2856 * count and avoid infinite retries. We're taking the
2857 * potential risk of flagging false failures in the event
2858 * of a topology-related error (e.g. a SAS expander problem
2859 * causes a command addressed to a drive to fail), but
2860 * avoiding getting into an infinite retry loop. However,
2861 * if we get them while were moving a device, we should
2862 * fail the request as 'not there' because the device
2863 * is effectively gone.
2865 if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL)
2866 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2868 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2869 mpr_dprint(sc, MPR_INFO,
2870 "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2871 mpr_describe_table(mpr_iocstatus_string,
2872 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2873 target_id, cm->cm_desc.Default.SMID,
2874 le32toh(rep->IOCLogInfo),
2875 (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) ? " departing" : "");
2876 mpr_dprint(sc, MPR_XINFO,
2877 "SCSIStatus %x SCSIState %x xfercount %u\n",
2878 rep->SCSIStatus, rep->SCSIState,
2879 le32toh(rep->TransferCount));
2881 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2882 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2883 case MPI2_IOCSTATUS_INVALID_VPID:
2884 case MPI2_IOCSTATUS_INVALID_FIELD:
2885 case MPI2_IOCSTATUS_INVALID_STATE:
2886 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2887 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2888 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2889 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2890 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2892 mprsas_log_command(cm, MPR_XINFO,
2893 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2894 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2895 rep->SCSIStatus, rep->SCSIState,
2896 le32toh(rep->TransferCount));
2897 csio->resid = cm->cm_length;
2899 if (scsi_cdb[0] == UNMAP &&
2901 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2902 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2904 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2909 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2911 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2912 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2913 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2914 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2918 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2919 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2920 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2924 * Check to see if we're removing the device. If so, and this is the
2925 * last command on the queue, proceed with the deferred removal of the
2926 * device. Note, for removing a volume, this won't trigger because
2927 * pending_remove_tm will be NULL.
2929 if (cm->cm_targ->flags & MPRSAS_TARGET_INREMOVAL) {
2930 if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2931 cm->cm_targ->pending_remove_tm != NULL) {
2932 mpr_dprint(sc, MPR_INFO, "Last pending command complete: starting remove_device\n");
2933 mpr_map_command(sc, cm->cm_targ->pending_remove_tm);
2934 cm->cm_targ->pending_remove_tm = NULL;
2938 mpr_free_command(sc, cm);
2942 #if __FreeBSD_version >= 900026
2944 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2946 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2947 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2951 ccb = cm->cm_complete_data;
2954 * Currently there should be no way we can hit this case. It only
2955 * happens when we have a failure to allocate chain frames, and SMP
2956 * commands require two S/G elements only. That should be handled
2957 * in the standard request size.
2959 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2960 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2961 "request!\n", __func__, cm->cm_flags);
2962 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2966 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2968 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2969 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2973 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2974 sasaddr = le32toh(req->SASAddress.Low);
2975 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2977 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2978 MPI2_IOCSTATUS_SUCCESS ||
2979 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2980 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2981 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2982 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2986 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2987 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2989 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2990 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2992 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2996 * We sync in both directions because we had DMAs in the S/G list
2997 * in both directions.
2999 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3000 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3001 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3002 mpr_free_command(sc, cm);
3007 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
3009 struct mpr_command *cm;
3010 uint8_t *request, *response;
3011 MPI2_SMP_PASSTHROUGH_REQUEST *req;
3012 struct mpr_softc *sc;
3020 #if (__FreeBSD_version >= 1000028) || \
3021 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
3022 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
3023 case CAM_DATA_PADDR:
3024 case CAM_DATA_SG_PADDR:
3026 * XXX We don't yet support physical addresses here.
3028 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3029 "supported\n", __func__);
3030 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3035 * The chip does not support more than one buffer for the
3036 * request or response.
3038 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3039 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3040 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3041 "response buffer segments not supported for SMP\n",
3043 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3049 * The CAM_SCATTER_VALID flag was originally implemented
3050 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3051 * We have two. So, just take that flag to mean that we
3052 * might have S/G lists, and look at the S/G segment count
3053 * to figure out whether that is the case for each individual
3056 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3057 bus_dma_segment_t *req_sg;
3059 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3060 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3062 request = ccb->smpio.smp_request;
3064 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3065 bus_dma_segment_t *rsp_sg;
3067 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3068 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3070 response = ccb->smpio.smp_response;
3072 case CAM_DATA_VADDR:
3073 request = ccb->smpio.smp_request;
3074 response = ccb->smpio.smp_response;
3077 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3081 #else /* __FreeBSD_version < 1000028 */
3083 * XXX We don't yet support physical addresses here.
3085 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3086 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3087 "supported\n", __func__);
3088 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3094 * If the user wants to send an S/G list, check to make sure they
3095 * have single buffers.
3097 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3099 * The chip does not support more than one buffer for the
3100 * request or response.
3102 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3103 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3104 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3105 "response buffer segments not supported for SMP\n",
3107 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3113 * The CAM_SCATTER_VALID flag was originally implemented
3114 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3115 * We have two. So, just take that flag to mean that we
3116 * might have S/G lists, and look at the S/G segment count
3117 * to figure out whether that is the case for each individual
3120 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3121 bus_dma_segment_t *req_sg;
3123 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3124 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3126 request = ccb->smpio.smp_request;
3128 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3129 bus_dma_segment_t *rsp_sg;
3131 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3132 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3134 response = ccb->smpio.smp_response;
3136 request = ccb->smpio.smp_request;
3137 response = ccb->smpio.smp_response;
3139 #endif /* __FreeBSD_version < 1000028 */
3141 cm = mpr_alloc_command(sc);
3143 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3145 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3150 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3151 bzero(req, sizeof(*req));
3152 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3154 /* Allow the chip to use any route to this SAS address. */
3155 req->PhysicalPort = 0xff;
3157 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3159 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3161 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3162 "%#jx\n", __func__, (uintmax_t)sasaddr);
3164 mpr_init_sge(cm, req, &req->SGL);
3167 * Set up a uio to pass into mpr_map_command(). This allows us to
3168 * do one map command, and one busdma call in there.
3170 cm->cm_uio.uio_iov = cm->cm_iovec;
3171 cm->cm_uio.uio_iovcnt = 2;
3172 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3175 * The read/write flag isn't used by busdma, but set it just in
3176 * case. This isn't exactly accurate, either, since we're going in
3179 cm->cm_uio.uio_rw = UIO_WRITE;
3181 cm->cm_iovec[0].iov_base = request;
3182 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3183 cm->cm_iovec[1].iov_base = response;
3184 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3186 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3187 cm->cm_iovec[1].iov_len;
3190 * Trigger a warning message in mpr_data_cb() for the user if we
3191 * wind up exceeding two S/G segments. The chip expects one
3192 * segment for the request and another for the response.
3194 cm->cm_max_segs = 2;
3196 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3197 cm->cm_complete = mprsas_smpio_complete;
3198 cm->cm_complete_data = ccb;
3201 * Tell the mapping code that we're using a uio, and that this is
3202 * an SMP passthrough request. There is a little special-case
3203 * logic there (in mpr_data_cb()) to handle the bidirectional
3206 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3207 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3209 /* The chip data format is little endian. */
3210 req->SASAddress.High = htole32(sasaddr >> 32);
3211 req->SASAddress.Low = htole32(sasaddr);
3214 * XXX Note that we don't have a timeout/abort mechanism here.
3215 * From the manual, it looks like task management requests only
3216 * work for SCSI IO and SATA passthrough requests. We may need to
3217 * have a mechanism to retry requests in the event of a chip reset
3218 * at least. Hopefully the chip will insure that any errors short
3219 * of that are relayed back to the driver.
3221 error = mpr_map_command(sc, cm);
3222 if ((error != 0) && (error != EINPROGRESS)) {
3223 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3224 "mpr_map_command()\n", __func__, error);
3231 mpr_free_command(sc, cm);
3232 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3238 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3240 struct mpr_softc *sc;
3241 struct mprsas_target *targ;
3242 uint64_t sasaddr = 0;
3247 * Make sure the target exists.
3249 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3250 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3251 targ = &sassc->targets[ccb->ccb_h.target_id];
3252 if (targ->handle == 0x0) {
3253 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3254 __func__, ccb->ccb_h.target_id);
3255 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3261 * If this device has an embedded SMP target, we'll talk to it
3263 * figure out what the expander's address is.
3265 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3266 sasaddr = targ->sasaddr;
3269 * If we don't have a SAS address for the expander yet, try
3270 * grabbing it from the page 0x83 information cached in the
3271 * transport layer for this target. LSI expanders report the
3272 * expander SAS address as the port-associated SAS address in
3273 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3276 * XXX KDM disable this for now, but leave it commented out so that
3277 * it is obvious that this is another possible way to get the SAS
3280 * The parent handle method below is a little more reliable, and
3281 * the other benefit is that it works for devices other than SES
3282 * devices. So you can send a SMP request to a da(4) device and it
3283 * will get routed to the expander that device is attached to.
3284 * (Assuming the da(4) device doesn't contain an SMP target...)
3288 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3292 * If we still don't have a SAS address for the expander, look for
3293 * the parent device of this device, which is probably the expander.
3296 #ifdef OLD_MPR_PROBE
3297 struct mprsas_target *parent_target;
3300 if (targ->parent_handle == 0x0) {
3301 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3302 "a valid parent handle!\n", __func__, targ->handle);
3303 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3306 #ifdef OLD_MPR_PROBE
3307 parent_target = mprsas_find_target_by_handle(sassc, 0,
3308 targ->parent_handle);
3310 if (parent_target == NULL) {
3311 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3312 "a valid parent target!\n", __func__, targ->handle);
3313 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3317 if ((parent_target->devinfo &
3318 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3319 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3320 "does not have an SMP target!\n", __func__,
3321 targ->handle, parent_target->handle);
3322 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3326 sasaddr = parent_target->sasaddr;
3327 #else /* OLD_MPR_PROBE */
3328 if ((targ->parent_devinfo &
3329 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3330 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3331 "does not have an SMP target!\n", __func__,
3332 targ->handle, targ->parent_handle);
3333 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3337 if (targ->parent_sasaddr == 0x0) {
3338 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3339 "%d does not have a valid SAS address!\n", __func__,
3340 targ->handle, targ->parent_handle);
3341 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3345 sasaddr = targ->parent_sasaddr;
3346 #endif /* OLD_MPR_PROBE */
3351 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3352 "handle %d\n", __func__, targ->handle);
3353 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3356 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3364 #endif //__FreeBSD_version >= 900026
3367 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3369 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3370 struct mpr_softc *sc;
3371 struct mpr_command *tm;
3372 struct mprsas_target *targ;
3374 MPR_FUNCTRACE(sassc->sc);
3375 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3377 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3378 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3380 tm = mprsas_alloc_tm(sc);
3382 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3383 "mprsas_action_resetdev\n");
3384 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3389 targ = &sassc->targets[ccb->ccb_h.target_id];
3390 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3391 req->DevHandle = htole16(targ->handle);
3392 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3394 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3395 /* SAS Hard Link Reset / SATA Link Reset */
3396 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3398 /* PCIe Protocol Level Reset*/
3400 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3404 tm->cm_complete = mprsas_resetdev_complete;
3405 tm->cm_complete_data = ccb;
3407 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3408 __func__, targ->tid);
3411 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3412 mpr_map_command(sc, tm);
3416 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3418 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3422 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3424 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3425 ccb = tm->cm_complete_data;
3428 * Currently there should be no way we can hit this case. It only
3429 * happens when we have a failure to allocate chain frames, and
3430 * task management commands don't have S/G lists.
3432 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3433 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3435 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3437 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3438 "handle %#04x! This should not happen!\n", __func__,
3439 tm->cm_flags, req->DevHandle);
3440 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3444 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3445 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3447 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3448 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3449 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3453 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3457 mprsas_free_tm(sc, tm);
3462 mprsas_poll(struct cam_sim *sim)
3464 struct mprsas_softc *sassc;
3466 sassc = cam_sim_softc(sim);
3468 if (sassc->sc->mpr_debug & MPR_TRACE) {
3469 /* frequent debug messages during a panic just slow
3470 * everything down too much.
3472 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3474 sassc->sc->mpr_debug &= ~MPR_TRACE;
3477 mpr_intr_locked(sassc->sc);
3481 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3484 struct mpr_softc *sc;
3486 sc = (struct mpr_softc *)callback_arg;
3489 #if (__FreeBSD_version >= 1000006) || \
3490 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3491 case AC_ADVINFO_CHANGED: {
3492 struct mprsas_target *target;
3493 struct mprsas_softc *sassc;
3494 struct scsi_read_capacity_data_long rcap_buf;
3495 struct ccb_dev_advinfo cdai;
3496 struct mprsas_lun *lun;
3501 buftype = (uintptr_t)arg;
3507 * We're only interested in read capacity data changes.
3509 if (buftype != CDAI_TYPE_RCAPLONG)
3513 * See the comment in mpr_attach_sas() for a detailed
3514 * explanation. In these versions of FreeBSD we register
3515 * for all events and filter out the events that don't
3518 #if (__FreeBSD_version < 1000703) || \
3519 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3520 if (xpt_path_path_id(path) != sassc->sim->path_id)
3525 * We should have a handle for this, but check to make sure.
3527 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3528 ("Target %d out of bounds in mprsas_async\n",
3529 xpt_path_target_id(path)));
3530 target = &sassc->targets[xpt_path_target_id(path)];
3531 if (target->handle == 0)
3534 lunid = xpt_path_lun_id(path);
3536 SLIST_FOREACH(lun, &target->luns, lun_link) {
3537 if (lun->lun_id == lunid) {
3543 if (found_lun == 0) {
3544 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3547 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3548 "LUN for EEDP support.\n");
3551 lun->lun_id = lunid;
3552 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3555 bzero(&rcap_buf, sizeof(rcap_buf));
3556 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3557 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3558 cdai.ccb_h.flags = CAM_DIR_IN;
3559 cdai.buftype = CDAI_TYPE_RCAPLONG;
3560 #if (__FreeBSD_version >= 1100061) || \
3561 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3562 cdai.flags = CDAI_FLAG_NONE;
3566 cdai.bufsiz = sizeof(rcap_buf);
3567 cdai.buf = (uint8_t *)&rcap_buf;
3568 xpt_action((union ccb *)&cdai);
3569 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3570 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3572 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3573 && (rcap_buf.prot & SRC16_PROT_EN)) {
3574 switch (rcap_buf.prot & SRC16_P_TYPE) {
3577 lun->eedp_formatted = TRUE;
3578 lun->eedp_block_size =
3579 scsi_4btoul(rcap_buf.length);
3583 lun->eedp_formatted = FALSE;
3584 lun->eedp_block_size = 0;
3588 lun->eedp_formatted = FALSE;
3589 lun->eedp_block_size = 0;
3594 case AC_FOUND_DEVICE: {
3595 struct ccb_getdev *cgd;
3598 * See the comment in mpr_attach_sas() for a detailed
3599 * explanation. In these versions of FreeBSD we register
3600 * for all events and filter out the events that don't
3603 #if (__FreeBSD_version < 1000703) || \
3604 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3605 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3610 #if (__FreeBSD_version < 901503) || \
3611 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3612 mprsas_check_eedp(sc, path, cgd);
3621 #if (__FreeBSD_version < 901503) || \
3622 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3624 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3625 struct ccb_getdev *cgd)
3627 struct mprsas_softc *sassc = sc->sassc;
3628 struct ccb_scsiio *csio;
3629 struct scsi_read_capacity_16 *scsi_cmd;
3630 struct scsi_read_capacity_eedp *rcap_buf;
3632 target_id_t targetid;
3635 struct cam_path *local_path;
3636 struct mprsas_target *target;
3637 struct mprsas_lun *lun;
3641 pathid = cam_sim_path(sassc->sim);
3642 targetid = xpt_path_target_id(path);
3643 lunid = xpt_path_lun_id(path);
3645 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3646 "mprsas_check_eedp\n", targetid));
3647 target = &sassc->targets[targetid];
3648 if (target->handle == 0x0)
3652 * Determine if the device is EEDP capable.
3654 * If this flag is set in the inquiry data, the device supports
3655 * protection information, and must support the 16 byte read capacity
3656 * command, otherwise continue without sending read cap 16.
3658 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3662 * Issue a READ CAPACITY 16 command. This info is used to determine if
3663 * the LUN is formatted for EEDP support.
3665 ccb = xpt_alloc_ccb_nowait();
3667 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3672 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3674 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3681 * If LUN is already in list, don't create a new one.
3684 SLIST_FOREACH(lun, &target->luns, lun_link) {
3685 if (lun->lun_id == lunid) {
3691 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3694 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3696 xpt_free_path(local_path);
3700 lun->lun_id = lunid;
3701 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3704 xpt_path_string(local_path, path_str, sizeof(path_str));
3705 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3706 path_str, target->handle);
3709 * Issue a READ CAPACITY 16 command for the LUN. The
3710 * mprsas_read_cap_done function will load the read cap info into the
3713 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3715 if (rcap_buf == NULL) {
3716 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3717 "buffer for EEDP support.\n");
3718 xpt_free_path(ccb->ccb_h.path);
3722 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3724 csio->ccb_h.func_code = XPT_SCSI_IO;
3725 csio->ccb_h.flags = CAM_DIR_IN;
3726 csio->ccb_h.retry_count = 4;
3727 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3728 csio->ccb_h.timeout = 60000;
3729 csio->data_ptr = (uint8_t *)rcap_buf;
3730 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3731 csio->sense_len = MPR_SENSE_LEN;
3732 csio->cdb_len = sizeof(*scsi_cmd);
3733 csio->tag_action = MSG_SIMPLE_Q_TAG;
3735 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3736 bzero(scsi_cmd, sizeof(*scsi_cmd));
3737 scsi_cmd->opcode = 0x9E;
3738 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3739 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3741 ccb->ccb_h.ppriv_ptr1 = sassc;
3746 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3748 struct mprsas_softc *sassc;
3749 struct mprsas_target *target;
3750 struct mprsas_lun *lun;
3751 struct scsi_read_capacity_eedp *rcap_buf;
3753 if (done_ccb == NULL)
3756 /* Driver need to release devq, it Scsi command is
3757 * generated by driver internally.
3758 * Currently there is a single place where driver
3759 * calls scsi command internally. In future if driver
3760 * calls more scsi command internally, it needs to release
3761 * devq internally, since those command will not go back to
3764 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3765 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3766 xpt_release_devq(done_ccb->ccb_h.path,
3767 /*count*/ 1, /*run_queue*/TRUE);
3770 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3773 * Get the LUN ID for the path and look it up in the LUN list for the
3776 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3777 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3778 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3779 target = &sassc->targets[done_ccb->ccb_h.target_id];
3780 SLIST_FOREACH(lun, &target->luns, lun_link) {
3781 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3785 * Got the LUN in the target's LUN list. Fill it in with EEDP
3786 * info. If the READ CAP 16 command had some SCSI error (common
3787 * if command is not supported), mark the lun as not supporting
3788 * EEDP and set the block size to 0.
3790 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3791 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3792 lun->eedp_formatted = FALSE;
3793 lun->eedp_block_size = 0;
3797 if (rcap_buf->protect & 0x01) {
3798 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3799 "%d is formatted for EEDP support.\n",
3800 done_ccb->ccb_h.target_lun,
3801 done_ccb->ccb_h.target_id);
3802 lun->eedp_formatted = TRUE;
3803 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3808 // Finished with this CCB and path.
3809 free(rcap_buf, M_MPR);
3810 xpt_free_path(done_ccb->ccb_h.path);
3811 xpt_free_ccb(done_ccb);
3813 #endif /* (__FreeBSD_version < 901503) || \
3814 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3817 * Set the INRESET flag for this target so that no I/O will be sent to
3818 * the target until the reset has completed. If an I/O request does
3819 * happen, the devq will be frozen. The CCB holds the path which is
3820 * used to release the devq. The devq is released and the CCB is freed
3821 * when the TM completes.
3824 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3825 struct mprsas_target *target, lun_id_t lun_id)
3830 ccb = xpt_alloc_ccb_nowait();
3832 path_id = cam_sim_path(sc->sassc->sim);
3833 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3834 target->tid, lun_id) != CAM_REQ_CMP) {
3838 tm->cm_targ = target;
3839 target->flags |= MPRSAS_TARGET_INRESET;
3845 mprsas_startup(struct mpr_softc *sc)
3848 * Send the port enable message and set the wait_for_port_enable flag.
3849 * This flag helps to keep the simq frozen until all discovery events
3852 sc->wait_for_port_enable = 1;
3853 mprsas_send_portenable(sc);
3858 mprsas_send_portenable(struct mpr_softc *sc)
3860 MPI2_PORT_ENABLE_REQUEST *request;
3861 struct mpr_command *cm;
3865 if ((cm = mpr_alloc_command(sc)) == NULL)
3867 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3868 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3869 request->MsgFlags = 0;
3871 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3872 cm->cm_complete = mprsas_portenable_complete;
3876 mpr_map_command(sc, cm);
3877 mpr_dprint(sc, MPR_XINFO,
3878 "mpr_send_portenable finished cm %p req %p complete %p\n",
3879 cm, cm->cm_req, cm->cm_complete);
3884 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3886 MPI2_PORT_ENABLE_REPLY *reply;
3887 struct mprsas_softc *sassc;
3893 * Currently there should be no way we can hit this case. It only
3894 * happens when we have a failure to allocate chain frames, and
3895 * port enable commands don't have S/G lists.
3897 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3898 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3899 "This should not happen!\n", __func__, cm->cm_flags);
3902 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3904 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3905 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3906 MPI2_IOCSTATUS_SUCCESS)
3907 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3909 mpr_free_command(sc, cm);
3911 * Done waiting for port enable to complete. Decrement the refcount.
3912 * If refcount is 0, discovery is complete and a rescan of the bus can
3915 sc->wait_for_port_enable = 0;
3916 sc->port_enable_complete = 1;
3917 wakeup(&sc->port_enable_complete);
3918 mprsas_startup_decrement(sassc);
3922 mprsas_check_id(struct mprsas_softc *sassc, int id)
3924 struct mpr_softc *sc = sassc->sc;
3928 ids = &sc->exclude_ids[0];
3929 while((name = strsep(&ids, ",")) != NULL) {
3930 if (name[0] == '\0')
3932 if (strtol(name, NULL, 0) == (long)id)
3940 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3942 struct mprsas_softc *sassc;
3943 struct mprsas_lun *lun, *lun_tmp;
3944 struct mprsas_target *targ;
3949 * The number of targets is based on IOC Facts, so free all of
3950 * the allocated LUNs for each target and then the target buffer
3953 for (i=0; i< maxtargets; i++) {
3954 targ = &sassc->targets[i];
3955 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3959 free(sassc->targets, M_MPR);
3961 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3962 M_MPR, M_WAITOK|M_ZERO);
3963 if (!sassc->targets) {
3964 panic("%s failed to alloc targets with error %d\n",