2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 /* Communications core for Avago Technologies (LSI) MPT3 */
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
48 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
57 #include <machine/bus.h>
58 #include <machine/resource.h>
61 #include <machine/stdarg.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/nvme/nvme.h>
78 #include <dev/mpr/mpi/mpi2_type.h>
79 #include <dev/mpr/mpi/mpi2.h>
80 #include <dev/mpr/mpi/mpi2_ioc.h>
81 #include <dev/mpr/mpi/mpi2_sas.h>
82 #include <dev/mpr/mpi/mpi2_pci.h>
83 #include <dev/mpr/mpi/mpi2_cnfg.h>
84 #include <dev/mpr/mpi/mpi2_init.h>
85 #include <dev/mpr/mpi/mpi2_tool.h>
86 #include <dev/mpr/mpr_ioctl.h>
87 #include <dev/mpr/mprvar.h>
88 #include <dev/mpr/mpr_table.h>
89 #include <dev/mpr/mpr_sas.h>
91 #define MPRSAS_DISCOVERY_TIMEOUT 20
92 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
95 * static array to check SCSI OpCode for EEDP protection bits
97 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
119 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
121 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
123 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mprsas_poll(struct cam_sim *sim);
125 static void mprsas_scsiio_timeout(void *data);
126 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
127 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
128 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
129 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
130 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
131 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
132 struct mpr_command *cm);
133 static void mprsas_async(void *callback_arg, uint32_t code,
134 struct cam_path *path, void *arg);
135 #if (__FreeBSD_version < 901503) || \
136 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
137 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
138 struct ccb_getdev *cgd);
139 static void mprsas_read_cap_done(struct cam_periph *periph,
140 union ccb *done_ccb);
142 static int mprsas_send_portenable(struct mpr_softc *sc);
143 static void mprsas_portenable_complete(struct mpr_softc *sc,
144 struct mpr_command *cm);
146 #if __FreeBSD_version >= 900026
147 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
148 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
150 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif //FreeBSD_version >= 900026
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
157 struct mprsas_target *target;
160 for (i = start; i < sassc->maxtargets; i++) {
161 target = &sassc->targets[i];
162 if (target->handle == handle)
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170 * commands before device handles have been found by discovery. Since
171 * discovery involves reading config pages and possibly sending commands,
172 * discovery actions may continue even after we receive the end of discovery
173 * event, so refcount discovery actions instead of assuming we can unfreeze
174 * the simq when we get the event.
177 mprsas_startup_increment(struct mprsas_softc *sassc)
179 MPR_FUNCTRACE(sassc->sc);
181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 if (sassc->startup_refcount++ == 0) {
183 /* just starting, freeze the simq */
184 mpr_dprint(sassc->sc, MPR_INIT,
185 "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
190 xpt_freeze_simq(sassc->sim, 1);
192 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 sassc->startup_refcount);
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
200 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 xpt_release_simq(sassc->sim, 1);
203 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
210 MPR_FUNCTRACE(sassc->sc);
212 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 if (--sassc->startup_refcount == 0) {
214 /* finished all discovery-related actions, release
215 * the simq and rescan for the latest topology.
217 mpr_dprint(sassc->sc, MPR_INIT,
218 "%s releasing simq\n", __func__);
219 sassc->flags &= ~MPRSAS_IN_STARTUP;
220 xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
225 mprsas_rescan_target(sassc->sc, NULL);
228 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 sassc->startup_refcount);
234 * The firmware requires us to stop sending commands when we're doing task
237 * XXX The logic for serializing the device has been made lazy and moved to
238 * mprsas_prepare_for_tm().
241 mprsas_alloc_tm(struct mpr_softc *sc)
243 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
244 struct mpr_command *tm;
247 tm = mpr_alloc_high_priority_command(sc);
251 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
252 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
259 int target_id = 0xFFFFFFFF;
266 * For TM's the devq is frozen for the device. Unfreeze it here and
267 * free the resources used for freezing the devq. Must clear the
268 * INRESET flag as well or scsi I/O will not work.
270 if (tm->cm_targ != NULL) {
271 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
272 target_id = tm->cm_targ->tid;
275 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
277 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
278 xpt_free_path(tm->cm_ccb->ccb_h.path);
279 xpt_free_ccb(tm->cm_ccb);
282 mpr_free_high_priority_command(sc, tm);
286 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
288 struct mprsas_softc *sassc = sc->sassc;
290 target_id_t targetid;
294 pathid = cam_sim_path(sassc->sim);
296 targetid = CAM_TARGET_WILDCARD;
298 targetid = targ - sassc->targets;
301 * Allocate a CCB and schedule a rescan.
303 ccb = xpt_alloc_ccb_nowait();
305 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
309 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
310 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
311 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
316 if (targetid == CAM_TARGET_WILDCARD)
317 ccb->ccb_h.func_code = XPT_SCAN_BUS;
319 ccb->ccb_h.func_code = XPT_SCAN_TGT;
321 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
336 /* No need to be in here if debugging isn't enabled */
337 if ((cm->cm_sc->mpr_debug & level) == 0)
340 sbuf_new(&sb, str, sizeof(str), 0);
344 if (cm->cm_ccb != NULL) {
345 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
347 sbuf_cat(&sb, path_str);
348 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
349 scsi_command_string(&cm->cm_ccb->csio, &sb);
350 sbuf_printf(&sb, "length %d ",
351 cm->cm_ccb->csio.dxfer_len);
354 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
355 cam_sim_name(cm->cm_sc->sassc->sim),
356 cam_sim_unit(cm->cm_sc->sassc->sim),
357 cam_sim_bus(cm->cm_sc->sassc->sim),
358 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
362 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
363 sbuf_vprintf(&sb, fmt, ap);
365 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
373 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
374 struct mprsas_target *targ;
379 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
380 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
384 /* XXX retry the remove after the diag reset completes? */
385 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
386 "0x%04x\n", __func__, handle);
387 mprsas_free_tm(sc, tm);
391 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
392 MPI2_IOCSTATUS_SUCCESS) {
393 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
394 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
397 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
398 le32toh(reply->TerminationCount));
399 mpr_free_reply(sc, tm->cm_reply_data);
400 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
402 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
406 * Don't clear target if remove fails because things will get confusing.
407 * Leave the devname and sasaddr intact so that we know to avoid reusing
408 * this target id if possible, and so we can assign the same target id
409 * to this device if it comes back in the future.
411 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
412 MPI2_IOCSTATUS_SUCCESS) {
415 targ->encl_handle = 0x0;
416 targ->encl_level_valid = 0x0;
417 targ->encl_level = 0x0;
418 targ->connector_name[0] = ' ';
419 targ->connector_name[1] = ' ';
420 targ->connector_name[2] = ' ';
421 targ->connector_name[3] = ' ';
422 targ->encl_slot = 0x0;
423 targ->exp_dev_handle = 0x0;
425 targ->linkrate = 0x0;
428 targ->scsi_req_desc_type = 0;
431 mprsas_free_tm(sc, tm);
436 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437 * Otherwise Volume Delete is same as Bare Drive Removal.
440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
442 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443 struct mpr_softc *sc;
444 struct mpr_command *cm;
445 struct mprsas_target *targ = NULL;
447 MPR_FUNCTRACE(sassc->sc);
450 targ = mprsas_find_target_by_handle(sassc, 0, handle);
452 /* FIXME: what is the action? */
453 /* We don't know about this device? */
454 mpr_dprint(sc, MPR_ERROR,
455 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
459 targ->flags |= MPRSAS_TARGET_INREMOVAL;
461 cm = mprsas_alloc_tm(sc);
463 mpr_dprint(sc, MPR_ERROR,
464 "%s: command alloc failure\n", __func__);
468 mprsas_rescan_target(sc, targ);
470 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471 req->DevHandle = targ->handle;
472 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
474 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
475 /* SAS Hard Link Reset / SATA Link Reset */
476 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
478 /* PCIe Protocol Level Reset*/
480 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
485 cm->cm_complete = mprsas_remove_volume;
486 cm->cm_complete_data = (void *)(uintptr_t)handle;
488 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
489 __func__, targ->tid);
490 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
492 mpr_map_command(sc, cm);
496 * The firmware performs debounce on the link to avoid transient link errors
497 * and false removals. When it does decide that link has been lost and a
498 * device needs to go away, it expects that the host will perform a target reset
499 * and then an op remove. The reset has the side-effect of aborting any
500 * outstanding requests for the device, which is required for the op-remove to
501 * succeed. It's not clear if the host should check for the device coming back
502 * alive after the reset.
505 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
507 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
508 struct mpr_softc *sc;
509 struct mpr_command *tm;
510 struct mprsas_target *targ = NULL;
512 MPR_FUNCTRACE(sassc->sc);
516 targ = mprsas_find_target_by_handle(sassc, 0, handle);
518 /* FIXME: what is the action? */
519 /* We don't know about this device? */
520 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
525 targ->flags |= MPRSAS_TARGET_INREMOVAL;
527 tm = mprsas_alloc_tm(sc);
529 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
534 mprsas_rescan_target(sc, targ);
536 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
537 memset(req, 0, sizeof(*req));
538 req->DevHandle = htole16(targ->handle);
539 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
541 /* SAS Hard Link Reset / SATA Link Reset */
542 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
546 tm->cm_complete = mprsas_remove_device;
547 tm->cm_complete_data = (void *)(uintptr_t)handle;
549 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
550 __func__, targ->tid);
551 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
553 mpr_map_command(sc, tm);
557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
559 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
560 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
561 struct mprsas_target *targ;
562 struct mpr_command *next_cm;
567 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
568 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
572 * Currently there should be no way we can hit this case. It only
573 * happens when we have a failure to allocate chain frames, and
574 * task management commands don't have S/G lists.
576 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
577 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
578 "handle %#04x! This should not happen!\n", __func__,
579 tm->cm_flags, handle);
583 /* XXX retry the remove after the diag reset completes? */
584 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
585 "0x%04x\n", __func__, handle);
586 mprsas_free_tm(sc, tm);
590 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
591 MPI2_IOCSTATUS_SUCCESS) {
592 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
593 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
596 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
597 le32toh(reply->TerminationCount));
598 mpr_free_reply(sc, tm->cm_reply_data);
599 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
601 /* Reuse the existing command */
602 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
603 memset(req, 0, sizeof(*req));
604 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
605 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
606 req->DevHandle = htole16(handle);
608 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
609 tm->cm_complete = mprsas_remove_complete;
610 tm->cm_complete_data = (void *)(uintptr_t)handle;
612 mpr_map_command(sc, tm);
614 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
616 if (targ->encl_level_valid) {
617 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
618 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
619 targ->connector_name);
621 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
624 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
625 ccb = tm->cm_complete_data;
626 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
627 tm->cm_state = MPR_CM_STATE_BUSY;
628 mprsas_scsiio_complete(sc, tm);
633 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
635 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
637 struct mprsas_target *targ;
638 struct mprsas_lun *lun;
642 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
643 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
646 * Currently there should be no way we can hit this case. It only
647 * happens when we have a failure to allocate chain frames, and
648 * task management commands don't have S/G lists.
650 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
651 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
652 "handle %#04x! This should not happen!\n", __func__,
653 tm->cm_flags, handle);
654 mprsas_free_tm(sc, tm);
659 /* most likely a chip reset */
660 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
661 "0x%04x\n", __func__, handle);
662 mprsas_free_tm(sc, tm);
666 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
667 __func__, handle, le16toh(reply->IOCStatus));
670 * Don't clear target if remove fails because things will get confusing.
671 * Leave the devname and sasaddr intact so that we know to avoid reusing
672 * this target id if possible, and so we can assign the same target id
673 * to this device if it comes back in the future.
675 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
676 MPI2_IOCSTATUS_SUCCESS) {
679 targ->encl_handle = 0x0;
680 targ->encl_level_valid = 0x0;
681 targ->encl_level = 0x0;
682 targ->connector_name[0] = ' ';
683 targ->connector_name[1] = ' ';
684 targ->connector_name[2] = ' ';
685 targ->connector_name[3] = ' ';
686 targ->encl_slot = 0x0;
687 targ->exp_dev_handle = 0x0;
689 targ->linkrate = 0x0;
692 targ->scsi_req_desc_type = 0;
694 while (!SLIST_EMPTY(&targ->luns)) {
695 lun = SLIST_FIRST(&targ->luns);
696 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
701 mprsas_free_tm(sc, tm);
705 mprsas_register_events(struct mpr_softc *sc)
710 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
711 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
712 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
713 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
714 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
715 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
716 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
717 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
718 setbit(events, MPI2_EVENT_IR_VOLUME);
719 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
720 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
721 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
722 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
723 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
724 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
725 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
726 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
727 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
728 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
732 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
733 &sc->sassc->mprsas_eh);
739 mpr_attach_sas(struct mpr_softc *sc)
741 struct mprsas_softc *sassc;
743 int unit, error = 0, reqs;
746 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
748 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
750 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
751 "Cannot allocate SAS subsystem memory\n");
756 * XXX MaxTargets could change during a reinit. Since we don't
757 * resize the targets[] array during such an event, cache the value
758 * of MaxTargets here so that we don't get into trouble later. This
759 * should move into the reinit logic.
761 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
762 sassc->targets = malloc(sizeof(struct mprsas_target) *
763 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
764 if (!sassc->targets) {
765 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
766 "Cannot allocate SAS target memory\n");
773 reqs = sc->num_reqs - sc->num_prireqs - 1;
774 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
775 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
780 unit = device_get_unit(sc->mpr_dev);
781 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
782 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
783 if (sassc->sim == NULL) {
784 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
789 TAILQ_INIT(&sassc->ev_queue);
791 /* Initialize taskqueue for Event Handling */
792 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
793 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
794 taskqueue_thread_enqueue, &sassc->ev_tq);
795 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
796 device_get_nameunit(sc->mpr_dev));
801 * XXX There should be a bus for every port on the adapter, but since
802 * we're just going to fake the topology for now, we'll pretend that
803 * everything is just a target on a single bus.
805 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
806 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
807 "Error %d registering SCSI bus\n", error);
813 * Assume that discovery events will start right away.
815 * Hold off boot until discovery is complete.
817 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
818 sc->sassc->startup_refcount = 0;
819 mprsas_startup_increment(sassc);
821 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
824 * Register for async events so we can determine the EEDP
825 * capabilities of devices.
827 status = xpt_create_path(&sassc->path, /*periph*/NULL,
828 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
830 if (status != CAM_REQ_CMP) {
831 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
832 "Error %#x creating sim path\n", status);
837 #if (__FreeBSD_version >= 1000006) || \
838 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
839 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
841 event = AC_FOUND_DEVICE;
845 * Prior to the CAM locking improvements, we can't call
846 * xpt_register_async() with a particular path specified.
848 * If a path isn't specified, xpt_register_async() will
849 * generate a wildcard path and acquire the XPT lock while
850 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
851 * It will then drop the XPT lock once that is done.
853 * If a path is specified for xpt_register_async(), it will
854 * not acquire and drop the XPT lock around the call to
855 * xpt_action(). xpt_action() asserts that the caller
856 * holds the SIM lock, so the SIM lock has to be held when
857 * calling xpt_register_async() when the path is specified.
859 * But xpt_register_async calls xpt_for_all_devices(),
860 * which calls xptbustraverse(), which will acquire each
861 * SIM lock. When it traverses our particular bus, it will
862 * necessarily acquire the SIM lock, which will lead to a
863 * recursive lock acquisition.
865 * The CAM locking changes fix this problem by acquiring
866 * the XPT topology lock around bus traversal in
867 * xptbustraverse(), so the caller can hold the SIM lock
868 * and it does not cause a recursive lock acquisition.
870 * These __FreeBSD_version values are approximate, especially
871 * for stable/10, which is two months later than the actual
875 #if (__FreeBSD_version < 1000703) || \
876 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
878 status = xpt_register_async(event, mprsas_async, sc,
882 status = xpt_register_async(event, mprsas_async, sc,
886 if (status != CAM_REQ_CMP) {
887 mpr_dprint(sc, MPR_ERROR,
888 "Error %#x registering async handler for "
889 "AC_ADVINFO_CHANGED events\n", status);
890 xpt_free_path(sassc->path);
894 if (status != CAM_REQ_CMP) {
896 * EEDP use is the exception, not the rule.
897 * Warn the user, but do not fail to attach.
899 mpr_printf(sc, "EEDP capabilities disabled.\n");
904 mprsas_register_events(sc);
909 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
914 mpr_detach_sas(struct mpr_softc *sc)
916 struct mprsas_softc *sassc;
917 struct mprsas_lun *lun, *lun_tmp;
918 struct mprsas_target *targ;
923 if (sc->sassc == NULL)
927 mpr_deregister_events(sc, sassc->mprsas_eh);
930 * Drain and free the event handling taskqueue with the lock
931 * unheld so that any parallel processing tasks drain properly
932 * without deadlocking.
934 if (sassc->ev_tq != NULL)
935 taskqueue_free(sassc->ev_tq);
937 /* Make sure CAM doesn't wedge if we had to bail out early. */
940 while (sassc->startup_refcount != 0)
941 mprsas_startup_decrement(sassc);
943 /* Deregister our async handler */
944 if (sassc->path != NULL) {
945 xpt_register_async(0, mprsas_async, sc, sassc->path);
946 xpt_free_path(sassc->path);
950 if (sassc->flags & MPRSAS_IN_STARTUP)
951 xpt_release_simq(sassc->sim, 1);
953 if (sassc->sim != NULL) {
954 xpt_bus_deregister(cam_sim_path(sassc->sim));
955 cam_sim_free(sassc->sim, FALSE);
960 if (sassc->devq != NULL)
961 cam_simq_free(sassc->devq);
963 for (i = 0; i < sassc->maxtargets; i++) {
964 targ = &sassc->targets[i];
965 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
969 free(sassc->targets, M_MPR);
977 mprsas_discovery_end(struct mprsas_softc *sassc)
979 struct mpr_softc *sc = sassc->sc;
983 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
984 callout_stop(&sassc->discovery_callout);
987 * After discovery has completed, check the mapping table for any
988 * missing devices and update their missing counts. Only do this once
989 * whenever the driver is initialized so that missing counts aren't
990 * updated unnecessarily. Note that just because discovery has
991 * completed doesn't mean that events have been processed yet. The
992 * check_devices function is a callout timer that checks if ALL devices
993 * are missing. If so, it will wait a little longer for events to
994 * complete and keep resetting itself until some device in the mapping
995 * table is not missing, meaning that event processing has started.
997 if (sc->track_mapping_events) {
998 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
999 "completed. Check for missing devices in the mapping "
1001 callout_reset(&sc->device_check_callout,
1002 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1008 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1010 struct mprsas_softc *sassc;
1012 sassc = cam_sim_softc(sim);
1014 MPR_FUNCTRACE(sassc->sc);
1015 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1016 ccb->ccb_h.func_code);
1017 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1019 switch (ccb->ccb_h.func_code) {
1022 struct ccb_pathinq *cpi = &ccb->cpi;
1023 struct mpr_softc *sc = sassc->sc;
1025 cpi->version_num = 1;
1026 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1027 cpi->target_sprt = 0;
1028 #if (__FreeBSD_version >= 1000039) || \
1029 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1030 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1032 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1034 cpi->hba_eng_cnt = 0;
1035 cpi->max_target = sassc->maxtargets - 1;
1039 * initiator_id is set here to an ID outside the set of valid
1040 * target IDs (including volumes).
1042 cpi->initiator_id = sassc->maxtargets;
1043 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1044 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1045 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1046 cpi->unit_number = cam_sim_unit(sim);
1047 cpi->bus_id = cam_sim_bus(sim);
1049 * XXXSLM-I think this needs to change based on config page or
1050 * something instead of hardcoded to 150000.
1052 cpi->base_transfer_speed = 150000;
1053 cpi->transport = XPORT_SAS;
1054 cpi->transport_version = 0;
1055 cpi->protocol = PROTO_SCSI;
1056 cpi->protocol_version = SCSI_REV_SPC;
1057 cpi->maxio = sc->maxio;
1058 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1061 case XPT_GET_TRAN_SETTINGS:
1063 struct ccb_trans_settings *cts;
1064 struct ccb_trans_settings_sas *sas;
1065 struct ccb_trans_settings_scsi *scsi;
1066 struct mprsas_target *targ;
1069 sas = &cts->xport_specific.sas;
1070 scsi = &cts->proto_specific.scsi;
1072 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1073 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1074 cts->ccb_h.target_id));
1075 targ = &sassc->targets[cts->ccb_h.target_id];
1076 if (targ->handle == 0x0) {
1077 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1081 cts->protocol_version = SCSI_REV_SPC2;
1082 cts->transport = XPORT_SAS;
1083 cts->transport_version = 0;
1085 sas->valid = CTS_SAS_VALID_SPEED;
1086 switch (targ->linkrate) {
1088 sas->bitrate = 150000;
1091 sas->bitrate = 300000;
1094 sas->bitrate = 600000;
1097 sas->bitrate = 1200000;
1103 cts->protocol = PROTO_SCSI;
1104 scsi->valid = CTS_SCSI_VALID_TQ;
1105 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1107 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1110 case XPT_CALC_GEOMETRY:
1111 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1112 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1115 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1117 mprsas_action_resetdev(sassc, ccb);
1122 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1123 "for abort or reset\n");
1124 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1127 mprsas_action_scsiio(sassc, ccb);
1129 #if __FreeBSD_version >= 900026
1131 mprsas_action_smpio(sassc, ccb);
1135 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1143 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1144 target_id_t target_id, lun_id_t lun_id)
1146 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1147 struct cam_path *path;
1149 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1150 ac_code, target_id, (uintmax_t)lun_id);
1152 if (xpt_create_path(&path, NULL,
1153 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1154 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1159 xpt_async(ac_code, path, NULL);
1160 xpt_free_path(path);
1164 mprsas_complete_all_commands(struct mpr_softc *sc)
1166 struct mpr_command *cm;
1171 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1173 /* complete all commands with a NULL reply */
1174 for (i = 1; i < sc->num_reqs; i++) {
1175 cm = &sc->commands[i];
1176 if (cm->cm_state == MPR_CM_STATE_FREE)
1179 cm->cm_state = MPR_CM_STATE_BUSY;
1180 cm->cm_reply = NULL;
1183 if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1185 free(cm->cm_data, M_MPR);
1189 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1190 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1192 if (cm->cm_complete != NULL) {
1193 mprsas_log_command(cm, MPR_RECOVERY,
1194 "completing cm %p state %x ccb %p for diag reset\n",
1195 cm, cm->cm_state, cm->cm_ccb);
1196 cm->cm_complete(sc, cm);
1198 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1199 mprsas_log_command(cm, MPR_RECOVERY,
1200 "waking up cm %p state %x ccb %p for diag reset\n",
1201 cm, cm->cm_state, cm->cm_ccb);
1206 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1207 /* this should never happen, but if it does, log */
1208 mprsas_log_command(cm, MPR_RECOVERY,
1209 "cm %p state %x flags 0x%x ccb %p during diag "
1210 "reset\n", cm, cm->cm_state, cm->cm_flags,
1215 sc->io_cmds_active = 0;
1219 mprsas_handle_reinit(struct mpr_softc *sc)
1223 /* Go back into startup mode and freeze the simq, so that CAM
1224 * doesn't send any commands until after we've rediscovered all
1225 * targets and found the proper device handles for them.
1227 * After the reset, portenable will trigger discovery, and after all
1228 * discovery-related activities have finished, the simq will be
1231 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1232 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1233 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1234 mprsas_startup_increment(sc->sassc);
1236 /* notify CAM of a bus reset */
1237 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1240 /* complete and cleanup after all outstanding commands */
1241 mprsas_complete_all_commands(sc);
1243 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1244 __func__, sc->sassc->startup_refcount);
1246 /* zero all the target handles, since they may change after the
1247 * reset, and we have to rediscover all the targets and use the new
1250 for (i = 0; i < sc->sassc->maxtargets; i++) {
1251 if (sc->sassc->targets[i].outstanding != 0)
1252 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1253 i, sc->sassc->targets[i].outstanding);
1254 sc->sassc->targets[i].handle = 0x0;
1255 sc->sassc->targets[i].exp_dev_handle = 0x0;
1256 sc->sassc->targets[i].outstanding = 0;
1257 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1261 mprsas_tm_timeout(void *data)
1263 struct mpr_command *tm = data;
1264 struct mpr_softc *sc = tm->cm_sc;
1266 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1268 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1271 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1272 ("command not inqueue\n"));
1274 tm->cm_state = MPR_CM_STATE_BUSY;
1279 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1281 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1282 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1283 unsigned int cm_count = 0;
1284 struct mpr_command *cm;
1285 struct mprsas_target *targ;
1287 callout_stop(&tm->cm_callout);
1289 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1290 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1294 * Currently there should be no way we can hit this case. It only
1295 * happens when we have a failure to allocate chain frames, and
1296 * task management commands don't have S/G lists.
1298 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1299 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1300 "%s: cm_flags = %#x for LUN reset! "
1301 "This should not happen!\n", __func__, tm->cm_flags);
1302 mprsas_free_tm(sc, tm);
1306 if (reply == NULL) {
1307 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1309 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1310 /* this completion was due to a reset, just cleanup */
1311 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1312 "reset, ignoring NULL LUN reset reply\n");
1314 mprsas_free_tm(sc, tm);
1317 /* we should have gotten a reply. */
1318 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1319 "LUN reset attempt, resetting controller\n");
1325 mpr_dprint(sc, MPR_RECOVERY,
1326 "logical unit reset status 0x%x code 0x%x count %u\n",
1327 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1328 le32toh(reply->TerminationCount));
1331 * See if there are any outstanding commands for this LUN.
1332 * This could be made more efficient by using a per-LU data
1333 * structure of some sort.
1335 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1336 if (cm->cm_lun == tm->cm_lun)
1340 if (cm_count == 0) {
1341 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1342 "Finished recovery after LUN reset for target %u\n",
1345 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1349 * We've finished recovery for this logical unit. check and
1350 * see if some other logical unit has a timedout command
1351 * that needs to be processed.
1353 cm = TAILQ_FIRST(&targ->timedout_commands);
1355 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1356 "More commands to abort for target %u\n", targ->tid);
1357 mprsas_send_abort(sc, tm, cm);
1360 mprsas_free_tm(sc, tm);
1363 /* if we still have commands for this LUN, the reset
1364 * effectively failed, regardless of the status reported.
1365 * Escalate to a target reset.
1367 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1368 "logical unit reset complete for target %u, but still "
1369 "have %u command(s), sending target reset\n", targ->tid,
1371 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1372 mprsas_send_reset(sc, tm,
1373 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1380 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1382 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1383 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1384 struct mprsas_target *targ;
1386 callout_stop(&tm->cm_callout);
1388 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1389 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1393 * Currently there should be no way we can hit this case. It only
1394 * happens when we have a failure to allocate chain frames, and
1395 * task management commands don't have S/G lists.
1397 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1398 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1399 "reset! This should not happen!\n", __func__, tm->cm_flags);
1400 mprsas_free_tm(sc, tm);
1404 if (reply == NULL) {
1405 mpr_dprint(sc, MPR_RECOVERY,
1406 "NULL target reset reply for tm %p TaskMID %u\n",
1407 tm, le16toh(req->TaskMID));
1408 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1409 /* this completion was due to a reset, just cleanup */
1410 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1411 "reset, ignoring NULL target reset reply\n");
1413 mprsas_free_tm(sc, tm);
1416 /* we should have gotten a reply. */
1417 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1418 "target reset attempt, resetting controller\n");
1424 mpr_dprint(sc, MPR_RECOVERY,
1425 "target reset status 0x%x code 0x%x count %u\n",
1426 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1427 le32toh(reply->TerminationCount));
1429 if (targ->outstanding == 0) {
1431 * We've finished recovery for this target and all
1432 * of its logical units.
1434 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1435 "Finished reset recovery for target %u\n", targ->tid);
1437 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1441 mprsas_free_tm(sc, tm);
1444 * After a target reset, if this target still has
1445 * outstanding commands, the reset effectively failed,
1446 * regardless of the status reported. escalate.
1448 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1449 "Target reset complete for target %u, but still have %u "
1450 "command(s), resetting controller\n", targ->tid,
1456 #define MPR_RESET_TIMEOUT 30
1459 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1461 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1462 struct mprsas_target *target;
1465 target = tm->cm_targ;
1466 if (target->handle == 0) {
1467 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1468 "%d\n", __func__, target->tid);
1472 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1473 req->DevHandle = htole16(target->handle);
1474 req->TaskType = type;
1476 if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1477 timeout = MPR_RESET_TIMEOUT;
1479 * Target reset method =
1480 * SAS Hard Link Reset / SATA Link Reset
1482 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1484 timeout = (target->controller_reset_timeout) ? (
1485 target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1486 /* PCIe Protocol Level Reset*/
1488 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1491 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1492 /* XXX Need to handle invalid LUNs */
1493 MPR_SET_LUN(req->LUN, tm->cm_lun);
1494 tm->cm_targ->logical_unit_resets++;
1495 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1496 "Sending logical unit reset to target %u lun %d\n",
1497 target->tid, tm->cm_lun);
1498 tm->cm_complete = mprsas_logical_unit_reset_complete;
1499 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1500 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1501 tm->cm_targ->target_resets++;
1502 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1503 "Sending target reset to target %u\n", target->tid);
1504 tm->cm_complete = mprsas_target_reset_complete;
1505 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1508 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1512 if (target->encl_level_valid) {
1513 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1514 "At enclosure level %d, slot %d, connector name (%4s)\n",
1515 target->encl_level, target->encl_slot,
1516 target->connector_name);
1520 tm->cm_complete_data = (void *)tm;
1522 callout_reset(&tm->cm_callout, timeout * hz,
1523 mprsas_tm_timeout, tm);
1525 err = mpr_map_command(sc, tm);
1527 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1528 "error %d sending reset type %u\n", err, type);
1535 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1537 struct mpr_command *cm;
1538 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1539 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1540 struct mprsas_target *targ;
1542 callout_stop(&tm->cm_callout);
1544 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1545 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1549 * Currently there should be no way we can hit this case. It only
1550 * happens when we have a failure to allocate chain frames, and
1551 * task management commands don't have S/G lists.
1553 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1554 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1555 "cm_flags = %#x for abort %p TaskMID %u!\n",
1556 tm->cm_flags, tm, le16toh(req->TaskMID));
1557 mprsas_free_tm(sc, tm);
1561 if (reply == NULL) {
1562 mpr_dprint(sc, MPR_RECOVERY,
1563 "NULL abort reply for tm %p TaskMID %u\n",
1564 tm, le16toh(req->TaskMID));
1565 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1566 /* this completion was due to a reset, just cleanup */
1567 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1568 "reset, ignoring NULL abort reply\n");
1570 mprsas_free_tm(sc, tm);
1572 /* we should have gotten a reply. */
1573 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1574 "abort attempt, resetting controller\n");
1580 mpr_dprint(sc, MPR_RECOVERY,
1581 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1582 le16toh(req->TaskMID),
1583 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1584 le32toh(reply->TerminationCount));
1586 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1589 * if there are no more timedout commands, we're done with
1590 * error recovery for this target.
1592 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1593 "Finished abort recovery for target %u\n", targ->tid);
1595 mprsas_free_tm(sc, tm);
1596 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1597 /* abort success, but we have more timedout commands to abort */
1598 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1599 "Continuing abort recovery for target %u\n", targ->tid);
1600 mprsas_send_abort(sc, tm, cm);
1603 * we didn't get a command completion, so the abort
1604 * failed as far as we're concerned. escalate.
1606 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1607 "Abort failed for target %u, sending logical unit reset\n",
1610 mprsas_send_reset(sc, tm,
1611 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1615 #define MPR_ABORT_TIMEOUT 5
1618 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1619 struct mpr_command *cm)
1621 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1622 struct mprsas_target *targ;
1626 if (targ->handle == 0) {
1627 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1628 "%s null devhandle for target_id %d\n",
1629 __func__, cm->cm_ccb->ccb_h.target_id);
1633 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1634 "Aborting command %p\n", cm);
1636 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1637 req->DevHandle = htole16(targ->handle);
1638 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1640 /* XXX Need to handle invalid LUNs */
1641 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1643 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1646 tm->cm_complete = mprsas_abort_complete;
1647 tm->cm_complete_data = (void *)tm;
1648 tm->cm_targ = cm->cm_targ;
1649 tm->cm_lun = cm->cm_lun;
1651 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1652 timeout = MPR_ABORT_TIMEOUT;
1654 timeout = sc->nvme_abort_timeout;
1656 callout_reset(&tm->cm_callout, timeout * hz,
1657 mprsas_tm_timeout, tm);
1661 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1663 err = mpr_map_command(sc, tm);
1665 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1666 "error %d sending abort for cm %p SMID %u\n",
1667 err, cm, req->TaskMID);
1672 mprsas_scsiio_timeout(void *data)
1674 sbintime_t elapsed, now;
1676 struct mpr_softc *sc;
1677 struct mpr_command *cm;
1678 struct mprsas_target *targ;
1680 cm = (struct mpr_command *)data;
1686 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1688 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1691 * Run the interrupt handler to make sure it's not pending. This
1692 * isn't perfect because the command could have already completed
1693 * and been re-used, though this is unlikely.
1695 mpr_intr_locked(sc);
1696 if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
1697 mprsas_log_command(cm, MPR_XINFO,
1698 "SCSI command %p almost timed out\n", cm);
1702 if (cm->cm_ccb == NULL) {
1703 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1710 elapsed = now - ccb->ccb_h.qos.sim_data;
1711 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1712 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1713 targ->tid, targ->handle, ccb->ccb_h.timeout,
1714 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1715 if (targ->encl_level_valid) {
1716 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1717 "At enclosure level %d, slot %d, connector name (%4s)\n",
1718 targ->encl_level, targ->encl_slot, targ->connector_name);
1721 /* XXX first, check the firmware state, to see if it's still
1722 * operational. if not, do a diag reset.
1724 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1725 cm->cm_flags |= MPR_CM_FLAGS_ON_RECOVERY | MPR_CM_FLAGS_TIMEDOUT;
1726 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1728 if (targ->tm != NULL) {
1729 /* target already in recovery, just queue up another
1730 * timedout command to be processed later.
1732 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1733 "processing by tm %p\n", cm, targ->tm);
1735 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1737 /* start recovery by aborting the first timedout command */
1738 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1739 "Sending abort to target %u for SMID %d\n", targ->tid,
1740 cm->cm_desc.Default.SMID);
1741 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1743 mprsas_send_abort(sc, targ->tm, cm);
1746 /* XXX queue this target up for recovery once a TM becomes
1747 * available. The firmware only has a limited number of
1748 * HighPriority credits for the high priority requests used
1749 * for task management, and we ran out.
1751 * Isilon: don't worry about this for now, since we have
1752 * more credits than disks in an enclosure, and limit
1753 * ourselves to one TM per target for recovery.
1755 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1756 "timedout cm %p failed to allocate a tm\n", cm);
1761 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1763 * Return 0 - for success,
1764 * 1 - to immediately return back the command with success status to CAM
1765 * negative value - to fallback to firmware path i.e. issue scsi unmap
1766 * to FW without any translation.
1769 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1770 union ccb *ccb, struct mprsas_target *targ)
1772 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1773 struct ccb_scsiio *csio;
1774 struct unmap_parm_list *plist;
1775 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1776 struct nvme_command *c;
1778 uint16_t ndesc, list_len, data_length;
1779 struct mpr_prp_page *prp_page_info;
1780 uint64_t nvme_dsm_ranges_dma_handle;
1783 #if __FreeBSD_version >= 1100103
1784 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1786 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1787 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1788 ccb->csio.cdb_io.cdb_ptr[8]);
1790 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1791 ccb->csio.cdb_io.cdb_bytes[8]);
1795 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1799 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1801 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1802 "save UNMAP data\n");
1806 /* Copy SCSI unmap data to a local buffer */
1807 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1809 /* return back the unmap command to CAM with success status,
1810 * if number of descripts is zero.
1812 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1814 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1815 "UNMAP cmd is Zero\n");
1820 data_length = ndesc * sizeof(struct nvme_dsm_range);
1821 if (data_length > targ->MDTS) {
1822 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1823 "Device's MDTS: %d\n", data_length, targ->MDTS);
1828 prp_page_info = mpr_alloc_prp_page(sc);
1829 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1830 "UNMAP command.\n", __func__));
1833 * Insert the allocated PRP page into the command's PRP page list. This
1834 * will be freed when the command is freed.
1836 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1838 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1839 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1841 bzero(nvme_dsm_ranges, data_length);
1843 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1844 * for each descriptors contained in SCSI UNMAP data.
1846 for (i = 0; i < ndesc; i++) {
1847 nvme_dsm_ranges[i].length =
1848 htole32(be32toh(plist->desc[i].nlb));
1849 nvme_dsm_ranges[i].starting_lba =
1850 htole64(be64toh(plist->desc[i].slba));
1851 nvme_dsm_ranges[i].attributes = 0;
1854 /* Build MPI2.6's NVMe Encapsulated Request Message */
1855 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1856 bzero(req, sizeof(*req));
1857 req->DevHandle = htole16(targ->handle);
1858 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1859 req->Flags = MPI26_NVME_FLAGS_WRITE;
1860 req->ErrorResponseBaseAddress.High =
1861 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1862 req->ErrorResponseBaseAddress.Low =
1863 htole32(cm->cm_sense_busaddr);
1864 req->ErrorResponseAllocationLength =
1865 htole16(sizeof(struct nvme_completion));
1866 req->EncapsulatedCommandLength =
1867 htole16(sizeof(struct nvme_command));
1868 req->DataLength = htole32(data_length);
1870 /* Build NVMe DSM command */
1871 c = (struct nvme_command *) req->NVMe_Command;
1872 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1873 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1874 c->cdw10 = htole32(ndesc - 1);
1875 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1877 cm->cm_length = data_length;
1880 cm->cm_complete = mprsas_scsiio_complete;
1881 cm->cm_complete_data = ccb;
1883 cm->cm_lun = csio->ccb_h.target_lun;
1886 cm->cm_desc.Default.RequestFlags =
1887 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1889 csio->ccb_h.qos.sim_data = sbinuptime();
1890 #if __FreeBSD_version >= 1000029
1891 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1892 mprsas_scsiio_timeout, cm, 0);
1893 #else //__FreeBSD_version < 1000029
1894 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1895 mprsas_scsiio_timeout, cm);
1896 #endif //__FreeBSD_version >= 1000029
1899 targ->outstanding++;
1900 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1901 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1903 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1904 __func__, cm, ccb, targ->outstanding);
1906 mpr_build_nvme_prp(sc, cm, req,
1907 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1908 mpr_map_command(sc, cm);
1916 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1918 MPI2_SCSI_IO_REQUEST *req;
1919 struct ccb_scsiio *csio;
1920 struct mpr_softc *sc;
1921 struct mprsas_target *targ;
1922 struct mprsas_lun *lun;
1923 struct mpr_command *cm;
1924 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1925 uint16_t eedp_flags;
1926 uint32_t mpi_control;
1931 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1934 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1935 ("Target %d out of bounds in XPT_SCSI_IO\n",
1936 csio->ccb_h.target_id));
1937 targ = &sassc->targets[csio->ccb_h.target_id];
1938 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1939 if (targ->handle == 0x0) {
1940 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1941 __func__, csio->ccb_h.target_id);
1942 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1946 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1947 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1948 "supported %u\n", __func__, csio->ccb_h.target_id);
1949 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1954 * Sometimes, it is possible to get a command that is not "In
1955 * Progress" and was actually aborted by the upper layer. Check for
1956 * this here and complete the command without error.
1958 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1959 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1960 "target %u\n", __func__, csio->ccb_h.target_id);
1965 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1966 * that the volume has timed out. We want volumes to be enumerated
1967 * until they are deleted/removed, not just failed.
1969 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1970 if (targ->devinfo == 0)
1971 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1973 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1978 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1979 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1980 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1986 * If target has a reset in progress, freeze the devq and return. The
1987 * devq will be released when the TM reset is finished.
1989 if (targ->flags & MPRSAS_TARGET_INRESET) {
1990 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1991 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1992 __func__, targ->tid);
1993 xpt_freeze_devq(ccb->ccb_h.path, 1);
1998 cm = mpr_alloc_command(sc);
1999 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
2001 mpr_free_command(sc, cm);
2003 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2004 xpt_freeze_simq(sassc->sim, 1);
2005 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2007 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2008 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2013 /* For NVME device's issue UNMAP command directly to NVME drives by
2014 * constructing equivalent native NVMe DataSetManagement command.
2016 #if __FreeBSD_version >= 1100103
2017 scsi_opcode = scsiio_cdb_ptr(csio)[0];
2019 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2020 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2022 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2024 if (scsi_opcode == UNMAP &&
2026 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2027 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2028 if (rc == 1) { /* return command to CAM with success status */
2029 mpr_free_command(sc, cm);
2030 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2033 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2037 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2038 bzero(req, sizeof(*req));
2039 req->DevHandle = htole16(targ->handle);
2040 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2042 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2043 req->SenseBufferLength = MPR_SENSE_LEN;
2045 req->ChainOffset = 0;
2046 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2051 req->DataLength = htole32(csio->dxfer_len);
2052 req->BidirectionalDataLength = 0;
2053 req->IoFlags = htole16(csio->cdb_len);
2056 /* Note: BiDirectional transfers are not supported */
2057 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2059 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2060 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2063 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2064 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2068 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2072 if (csio->cdb_len == 32)
2073 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2075 * It looks like the hardware doesn't require an explicit tag
2076 * number for each transaction. SAM Task Management not supported
2079 switch (csio->tag_action) {
2080 case MSG_HEAD_OF_Q_TAG:
2081 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2083 case MSG_ORDERED_Q_TAG:
2084 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2087 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2089 case CAM_TAG_ACTION_NONE:
2090 case MSG_SIMPLE_Q_TAG:
2092 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2095 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2096 req->Control = htole32(mpi_control);
2098 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2099 mpr_free_command(sc, cm);
2100 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2105 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2106 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2108 KASSERT(csio->cdb_len <= IOCDBLEN,
2109 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2110 "is not set", csio->cdb_len));
2111 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2113 req->IoFlags = htole16(csio->cdb_len);
2116 * Check if EEDP is supported and enabled. If it is then check if the
2117 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2118 * is formatted for EEDP support. If all of this is true, set CDB up
2119 * for EEDP transfer.
2121 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2122 if (sc->eedp_enabled && eedp_flags) {
2123 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2124 if (lun->lun_id == csio->ccb_h.target_lun) {
2129 if ((lun != NULL) && (lun->eedp_formatted)) {
2130 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2131 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2132 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2133 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2134 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2136 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2138 req->EEDPFlags = htole16(eedp_flags);
2141 * If CDB less than 32, fill in Primary Ref Tag with
2142 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2143 * already there. Also, set protection bit. FreeBSD
2144 * currently does not support CDBs bigger than 16, but
2145 * the code doesn't hurt, and will be here for the
2148 if (csio->cdb_len != 32) {
2149 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2150 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2151 PrimaryReferenceTag;
2152 for (i = 0; i < 4; i++) {
2154 req->CDB.CDB32[lba_byte + i];
2157 req->CDB.EEDP32.PrimaryReferenceTag =
2159 CDB.EEDP32.PrimaryReferenceTag);
2160 req->CDB.EEDP32.PrimaryApplicationTagMask =
2163 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2166 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2167 req->EEDPFlags = htole16(eedp_flags);
2168 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2174 cm->cm_length = csio->dxfer_len;
2175 if (cm->cm_length != 0) {
2177 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2181 cm->cm_sge = &req->SGL;
2182 cm->cm_sglsize = (32 - 24) * 4;
2183 cm->cm_complete = mprsas_scsiio_complete;
2184 cm->cm_complete_data = ccb;
2186 cm->cm_lun = csio->ccb_h.target_lun;
2189 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2190 * and set descriptor type.
2192 if (targ->scsi_req_desc_type ==
2193 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2194 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2195 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2196 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2197 if (!sc->atomic_desc_capable) {
2198 cm->cm_desc.FastPathSCSIIO.DevHandle =
2199 htole16(targ->handle);
2202 cm->cm_desc.SCSIIO.RequestFlags =
2203 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2204 if (!sc->atomic_desc_capable)
2205 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2208 csio->ccb_h.qos.sim_data = sbinuptime();
2209 #if __FreeBSD_version >= 1000029
2210 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2211 mprsas_scsiio_timeout, cm, 0);
2212 #else //__FreeBSD_version < 1000029
2213 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2214 mprsas_scsiio_timeout, cm);
2215 #endif //__FreeBSD_version >= 1000029
2218 targ->outstanding++;
2219 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2220 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2222 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2223 __func__, cm, ccb, targ->outstanding);
2225 mpr_map_command(sc, cm);
2230 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2233 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2234 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2238 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2239 MPI2_IOCSTATUS_MASK;
2240 u8 scsi_state = mpi_reply->SCSIState;
2241 u8 scsi_status = mpi_reply->SCSIStatus;
2242 char *desc_ioc_state = NULL;
2243 char *desc_scsi_status = NULL;
2244 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2246 if (log_info == 0x31170000)
2249 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2251 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2254 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2255 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2256 if (targ->encl_level_valid) {
2257 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2258 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2259 targ->connector_name);
2263 * We can add more detail about underflow data here
2266 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2267 "scsi_state %b\n", desc_scsi_status, scsi_status,
2268 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2269 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2271 if (sc->mpr_debug & MPR_XINFO &&
2272 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2273 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2274 scsi_sense_print(csio);
2275 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2278 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2279 response_info = le32toh(mpi_reply->ResponseInfo);
2280 response_bytes = (u8 *)&response_info;
2281 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2283 mpr_describe_table(mpr_scsi_taskmgmt_string,
2284 response_bytes[0]));
2288 /** mprsas_nvme_trans_status_code
2290 * Convert Native NVMe command error status to
2291 * equivalent SCSI error status.
2293 * Returns appropriate scsi_status
2296 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2297 struct mpr_command *cm)
2299 u8 status = MPI2_SCSI_STATUS_GOOD;
2300 int skey, asc, ascq;
2301 union ccb *ccb = cm->cm_complete_data;
2302 int returned_sense_len;
2305 sct = NVME_STATUS_GET_SCT(nvme_status);
2306 sc = NVME_STATUS_GET_SC(nvme_status);
2308 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2309 skey = SSD_KEY_ILLEGAL_REQUEST;
2310 asc = SCSI_ASC_NO_SENSE;
2311 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2314 case NVME_SCT_GENERIC:
2316 case NVME_SC_SUCCESS:
2317 status = MPI2_SCSI_STATUS_GOOD;
2318 skey = SSD_KEY_NO_SENSE;
2319 asc = SCSI_ASC_NO_SENSE;
2320 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2322 case NVME_SC_INVALID_OPCODE:
2323 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2324 skey = SSD_KEY_ILLEGAL_REQUEST;
2325 asc = SCSI_ASC_ILLEGAL_COMMAND;
2326 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2328 case NVME_SC_INVALID_FIELD:
2329 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2330 skey = SSD_KEY_ILLEGAL_REQUEST;
2331 asc = SCSI_ASC_INVALID_CDB;
2332 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2334 case NVME_SC_DATA_TRANSFER_ERROR:
2335 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2336 skey = SSD_KEY_MEDIUM_ERROR;
2337 asc = SCSI_ASC_NO_SENSE;
2338 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2340 case NVME_SC_ABORTED_POWER_LOSS:
2341 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2342 skey = SSD_KEY_ABORTED_COMMAND;
2343 asc = SCSI_ASC_WARNING;
2344 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2346 case NVME_SC_INTERNAL_DEVICE_ERROR:
2347 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2348 skey = SSD_KEY_HARDWARE_ERROR;
2349 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2350 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2352 case NVME_SC_ABORTED_BY_REQUEST:
2353 case NVME_SC_ABORTED_SQ_DELETION:
2354 case NVME_SC_ABORTED_FAILED_FUSED:
2355 case NVME_SC_ABORTED_MISSING_FUSED:
2356 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2357 skey = SSD_KEY_ABORTED_COMMAND;
2358 asc = SCSI_ASC_NO_SENSE;
2359 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2361 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2362 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2363 skey = SSD_KEY_ILLEGAL_REQUEST;
2364 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2365 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2367 case NVME_SC_LBA_OUT_OF_RANGE:
2368 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2369 skey = SSD_KEY_ILLEGAL_REQUEST;
2370 asc = SCSI_ASC_ILLEGAL_BLOCK;
2371 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2373 case NVME_SC_CAPACITY_EXCEEDED:
2374 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2375 skey = SSD_KEY_MEDIUM_ERROR;
2376 asc = SCSI_ASC_NO_SENSE;
2377 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2379 case NVME_SC_NAMESPACE_NOT_READY:
2380 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2381 skey = SSD_KEY_NOT_READY;
2382 asc = SCSI_ASC_LUN_NOT_READY;
2383 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2387 case NVME_SCT_COMMAND_SPECIFIC:
2389 case NVME_SC_INVALID_FORMAT:
2390 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2391 skey = SSD_KEY_ILLEGAL_REQUEST;
2392 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2393 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2395 case NVME_SC_CONFLICTING_ATTRIBUTES:
2396 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2397 skey = SSD_KEY_ILLEGAL_REQUEST;
2398 asc = SCSI_ASC_INVALID_CDB;
2399 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2403 case NVME_SCT_MEDIA_ERROR:
2405 case NVME_SC_WRITE_FAULTS:
2406 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2407 skey = SSD_KEY_MEDIUM_ERROR;
2408 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2409 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2411 case NVME_SC_UNRECOVERED_READ_ERROR:
2412 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2413 skey = SSD_KEY_MEDIUM_ERROR;
2414 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2415 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2417 case NVME_SC_GUARD_CHECK_ERROR:
2418 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2419 skey = SSD_KEY_MEDIUM_ERROR;
2420 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2421 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2423 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2424 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2425 skey = SSD_KEY_MEDIUM_ERROR;
2426 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2427 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2429 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2430 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2431 skey = SSD_KEY_MEDIUM_ERROR;
2432 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2433 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2435 case NVME_SC_COMPARE_FAILURE:
2436 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2437 skey = SSD_KEY_MISCOMPARE;
2438 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2439 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2441 case NVME_SC_ACCESS_DENIED:
2442 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2443 skey = SSD_KEY_ILLEGAL_REQUEST;
2444 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2445 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2451 returned_sense_len = sizeof(struct scsi_sense_data);
2452 if (returned_sense_len < ccb->csio.sense_len)
2453 ccb->csio.sense_resid = ccb->csio.sense_len -
2456 ccb->csio.sense_resid = 0;
2458 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2459 1, skey, asc, ascq, SSD_ELEM_NONE);
2460 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2465 /** mprsas_complete_nvme_unmap
2467 * Complete native NVMe command issued using NVMe Encapsulated
2471 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2473 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2474 struct nvme_completion *nvme_completion = NULL;
2475 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2477 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2478 if (le16toh(mpi_reply->ErrorResponseCount)){
2479 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2480 scsi_status = mprsas_nvme_trans_status_code(
2481 nvme_completion->status, cm);
2487 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2489 MPI2_SCSI_IO_REPLY *rep;
2491 struct ccb_scsiio *csio;
2492 struct mprsas_softc *sassc;
2493 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2494 u8 *TLR_bits, TLR_on, *scsi_cdb;
2497 struct mprsas_target *target;
2498 target_id_t target_id;
2501 mpr_dprint(sc, MPR_TRACE,
2502 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2503 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2504 cm->cm_targ->outstanding);
2506 callout_stop(&cm->cm_callout);
2507 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2510 ccb = cm->cm_complete_data;
2512 target_id = csio->ccb_h.target_id;
2513 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2515 * XXX KDM if the chain allocation fails, does it matter if we do
2516 * the sync and unload here? It is simpler to do it in every case,
2517 * assuming it doesn't cause problems.
2519 if (cm->cm_data != NULL) {
2520 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2521 dir = BUS_DMASYNC_POSTREAD;
2522 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2523 dir = BUS_DMASYNC_POSTWRITE;
2524 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2525 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2528 cm->cm_targ->completed++;
2529 cm->cm_targ->outstanding--;
2530 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2531 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2533 if (cm->cm_flags & MPR_CM_FLAGS_ON_RECOVERY) {
2534 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2535 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY,
2536 ("Not busy for CM_FLAGS_TIMEDOUT: %d\n", cm->cm_state));
2537 cm->cm_flags &= ~MPR_CM_FLAGS_ON_RECOVERY;
2538 if (cm->cm_reply != NULL)
2539 mprsas_log_command(cm, MPR_RECOVERY,
2540 "completed timedout cm %p ccb %p during recovery "
2541 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2542 le16toh(rep->IOCStatus), rep->SCSIStatus,
2543 rep->SCSIState, le32toh(rep->TransferCount));
2545 mprsas_log_command(cm, MPR_RECOVERY,
2546 "completed timedout cm %p ccb %p during recovery\n",
2548 } else if (cm->cm_targ->tm != NULL) {
2549 if (cm->cm_reply != NULL)
2550 mprsas_log_command(cm, MPR_RECOVERY,
2551 "completed cm %p ccb %p during recovery "
2552 "ioc %x scsi %x state %x xfer %u\n",
2553 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2554 rep->SCSIStatus, rep->SCSIState,
2555 le32toh(rep->TransferCount));
2557 mprsas_log_command(cm, MPR_RECOVERY,
2558 "completed cm %p ccb %p during recovery\n",
2560 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2561 mprsas_log_command(cm, MPR_RECOVERY,
2562 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2565 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2567 * We ran into an error after we tried to map the command,
2568 * so we're getting a callback without queueing the command
2569 * to the hardware. So we set the status here, and it will
2570 * be retained below. We'll go through the "fast path",
2571 * because there can be no reply when we haven't actually
2572 * gone out to the hardware.
2574 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2577 * Currently the only error included in the mask is
2578 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2579 * chain frames. We need to freeze the queue until we get
2580 * a command that completed without this error, which will
2581 * hopefully have some chain frames attached that we can
2582 * use. If we wanted to get smarter about it, we would
2583 * only unfreeze the queue in this condition when we're
2584 * sure that we're getting some chain frames back. That's
2585 * probably unnecessary.
2587 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2588 xpt_freeze_simq(sassc->sim, 1);
2589 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2590 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2591 "freezing SIM queue\n");
2596 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2597 * flag, and use it in a few places in the rest of this function for
2598 * convenience. Use the macro if available.
2600 #if __FreeBSD_version >= 1100103
2601 scsi_cdb = scsiio_cdb_ptr(csio);
2603 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2604 scsi_cdb = csio->cdb_io.cdb_ptr;
2606 scsi_cdb = csio->cdb_io.cdb_bytes;
2610 * If this is a Start Stop Unit command and it was issued by the driver
2611 * during shutdown, decrement the refcount to account for all of the
2612 * commands that were sent. All SSU commands should be completed before
2613 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2616 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2617 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2621 /* Take the fast path to completion */
2622 if (cm->cm_reply == NULL) {
2623 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2624 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2625 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2627 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2628 csio->scsi_status = SCSI_STATUS_OK;
2630 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2631 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2632 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2633 mpr_dprint(sc, MPR_XINFO,
2634 "Unfreezing SIM queue\n");
2639 * There are two scenarios where the status won't be
2640 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2641 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2643 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2645 * Freeze the dev queue so that commands are
2646 * executed in the correct order after error
2649 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2650 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2652 mpr_free_command(sc, cm);
2657 target = &sassc->targets[target_id];
2658 if (scsi_cdb[0] == UNMAP &&
2660 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2661 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2662 csio->scsi_status = rep->SCSIStatus;
2665 mprsas_log_command(cm, MPR_XINFO,
2666 "ioc %x scsi %x state %x xfer %u\n",
2667 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2668 le32toh(rep->TransferCount));
2670 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2671 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2672 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2674 case MPI2_IOCSTATUS_SUCCESS:
2675 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2676 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2677 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2678 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2680 /* Completion failed at the transport level. */
2681 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2682 MPI2_SCSI_STATE_TERMINATED)) {
2683 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2687 /* In a modern packetized environment, an autosense failure
2688 * implies that there's not much else that can be done to
2689 * recover the command.
2691 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2692 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2697 * CAM doesn't care about SAS Response Info data, but if this is
2698 * the state check if TLR should be done. If not, clear the
2699 * TLR_bits for the target.
2701 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2702 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2703 == MPR_SCSI_RI_INVALID_FRAME)) {
2704 sc->mapping_table[target_id].TLR_bits =
2705 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2709 * Intentionally override the normal SCSI status reporting
2710 * for these two cases. These are likely to happen in a
2711 * multi-initiator environment, and we want to make sure that
2712 * CAM retries these commands rather than fail them.
2714 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2715 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2716 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2720 /* Handle normal status and sense */
2721 csio->scsi_status = rep->SCSIStatus;
2722 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2723 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2725 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2727 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2728 int sense_len, returned_sense_len;
2730 returned_sense_len = min(le32toh(rep->SenseCount),
2731 sizeof(struct scsi_sense_data));
2732 if (returned_sense_len < csio->sense_len)
2733 csio->sense_resid = csio->sense_len -
2736 csio->sense_resid = 0;
2738 sense_len = min(returned_sense_len,
2739 csio->sense_len - csio->sense_resid);
2740 bzero(&csio->sense_data, sizeof(csio->sense_data));
2741 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2742 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2746 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2747 * and it's page code 0 (Supported Page List), and there is
2748 * inquiry data, and this is for a sequential access device, and
2749 * the device is an SSP target, and TLR is supported by the
2750 * controller, turn the TLR_bits value ON if page 0x90 is
2753 if ((scsi_cdb[0] == INQUIRY) &&
2754 (scsi_cdb[1] & SI_EVPD) &&
2755 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2756 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2757 (csio->data_ptr != NULL) &&
2758 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2759 (sc->control_TLR) &&
2760 (sc->mapping_table[target_id].device_info &
2761 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2762 vpd_list = (struct scsi_vpd_supported_page_list *)
2764 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2765 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2766 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2767 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2768 alloc_len -= csio->resid;
2769 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2770 if (vpd_list->list[i] == 0x90) {
2778 * If this is a SATA direct-access end device, mark it so that
2779 * a SCSI StartStopUnit command will be sent to it when the
2780 * driver is being shutdown.
2782 if ((scsi_cdb[0] == INQUIRY) &&
2783 (csio->data_ptr != NULL) &&
2784 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2785 (sc->mapping_table[target_id].device_info &
2786 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2787 ((sc->mapping_table[target_id].device_info &
2788 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2789 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2790 target = &sassc->targets[target_id];
2791 target->supports_SSU = TRUE;
2792 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2796 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2797 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2799 * If devinfo is 0 this will be a volume. In that case don't
2800 * tell CAM that the volume is not there. We want volumes to
2801 * be enumerated until they are deleted/removed, not just
2804 if (cm->cm_targ->devinfo == 0)
2805 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2807 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2809 case MPI2_IOCSTATUS_INVALID_SGL:
2810 mpr_print_scsiio_cmd(sc, cm);
2811 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2813 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2815 * This is one of the responses that comes back when an I/O
2816 * has been aborted. If it is because of a timeout that we
2817 * initiated, just set the status to CAM_CMD_TIMEOUT.
2818 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2819 * command is the same (it gets retried, subject to the
2820 * retry counter), the only difference is what gets printed
2823 if (cm->cm_flags & MPR_CM_FLAGS_TIMEDOUT)
2824 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2826 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2828 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2829 /* resid is ignored for this condition */
2831 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2833 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2834 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2836 * These can sometimes be transient transport-related
2837 * errors, and sometimes persistent drive-related errors.
2838 * We used to retry these without decrementing the retry
2839 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2840 * we hit a persistent drive problem that returns one of
2841 * these error codes, we would retry indefinitely. So,
2842 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2843 * count and avoid infinite retries. We're taking the
2844 * potential risk of flagging false failures in the event
2845 * of a topology-related error (e.g. a SAS expander problem
2846 * causes a command addressed to a drive to fail), but
2847 * avoiding getting into an infinite retry loop.
2849 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2850 mpr_dprint(sc, MPR_INFO,
2851 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2852 mpr_describe_table(mpr_iocstatus_string,
2853 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2854 target_id, cm->cm_desc.Default.SMID,
2855 le32toh(rep->IOCLogInfo));
2856 mpr_dprint(sc, MPR_XINFO,
2857 "SCSIStatus %x SCSIState %x xfercount %u\n",
2858 rep->SCSIStatus, rep->SCSIState,
2859 le32toh(rep->TransferCount));
2861 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2862 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2863 case MPI2_IOCSTATUS_INVALID_VPID:
2864 case MPI2_IOCSTATUS_INVALID_FIELD:
2865 case MPI2_IOCSTATUS_INVALID_STATE:
2866 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2867 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2868 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2869 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2870 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2872 mprsas_log_command(cm, MPR_XINFO,
2873 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2874 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2875 rep->SCSIStatus, rep->SCSIState,
2876 le32toh(rep->TransferCount));
2877 csio->resid = cm->cm_length;
2879 if (scsi_cdb[0] == UNMAP &&
2881 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2882 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2884 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2889 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2891 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2892 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2893 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2894 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2898 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2899 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2900 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2903 mpr_free_command(sc, cm);
2907 #if __FreeBSD_version >= 900026
2909 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2911 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2912 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2916 ccb = cm->cm_complete_data;
2919 * Currently there should be no way we can hit this case. It only
2920 * happens when we have a failure to allocate chain frames, and SMP
2921 * commands require two S/G elements only. That should be handled
2922 * in the standard request size.
2924 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2925 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2926 "request!\n", __func__, cm->cm_flags);
2927 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2931 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2933 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2934 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2938 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2939 sasaddr = le32toh(req->SASAddress.Low);
2940 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2942 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2943 MPI2_IOCSTATUS_SUCCESS ||
2944 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2945 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2946 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2947 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2951 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2952 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2954 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2955 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2957 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2961 * We sync in both directions because we had DMAs in the S/G list
2962 * in both directions.
2964 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2965 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2966 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2967 mpr_free_command(sc, cm);
2972 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2974 struct mpr_command *cm;
2975 uint8_t *request, *response;
2976 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2977 struct mpr_softc *sc;
2985 #if (__FreeBSD_version >= 1000028) || \
2986 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2987 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2988 case CAM_DATA_PADDR:
2989 case CAM_DATA_SG_PADDR:
2991 * XXX We don't yet support physical addresses here.
2993 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2994 "supported\n", __func__);
2995 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3000 * The chip does not support more than one buffer for the
3001 * request or response.
3003 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3004 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3005 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3006 "response buffer segments not supported for SMP\n",
3008 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3014 * The CAM_SCATTER_VALID flag was originally implemented
3015 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3016 * We have two. So, just take that flag to mean that we
3017 * might have S/G lists, and look at the S/G segment count
3018 * to figure out whether that is the case for each individual
3021 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3022 bus_dma_segment_t *req_sg;
3024 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3025 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3027 request = ccb->smpio.smp_request;
3029 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3030 bus_dma_segment_t *rsp_sg;
3032 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3033 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3035 response = ccb->smpio.smp_response;
3037 case CAM_DATA_VADDR:
3038 request = ccb->smpio.smp_request;
3039 response = ccb->smpio.smp_response;
3042 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3046 #else /* __FreeBSD_version < 1000028 */
3048 * XXX We don't yet support physical addresses here.
3050 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3051 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3052 "supported\n", __func__);
3053 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3059 * If the user wants to send an S/G list, check to make sure they
3060 * have single buffers.
3062 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3064 * The chip does not support more than one buffer for the
3065 * request or response.
3067 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3068 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3069 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3070 "response buffer segments not supported for SMP\n",
3072 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3078 * The CAM_SCATTER_VALID flag was originally implemented
3079 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3080 * We have two. So, just take that flag to mean that we
3081 * might have S/G lists, and look at the S/G segment count
3082 * to figure out whether that is the case for each individual
3085 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3086 bus_dma_segment_t *req_sg;
3088 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3089 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3091 request = ccb->smpio.smp_request;
3093 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3094 bus_dma_segment_t *rsp_sg;
3096 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3097 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3099 response = ccb->smpio.smp_response;
3101 request = ccb->smpio.smp_request;
3102 response = ccb->smpio.smp_response;
3104 #endif /* __FreeBSD_version < 1000028 */
3106 cm = mpr_alloc_command(sc);
3108 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3110 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3115 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3116 bzero(req, sizeof(*req));
3117 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3119 /* Allow the chip to use any route to this SAS address. */
3120 req->PhysicalPort = 0xff;
3122 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3124 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3126 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3127 "%#jx\n", __func__, (uintmax_t)sasaddr);
3129 mpr_init_sge(cm, req, &req->SGL);
3132 * Set up a uio to pass into mpr_map_command(). This allows us to
3133 * do one map command, and one busdma call in there.
3135 cm->cm_uio.uio_iov = cm->cm_iovec;
3136 cm->cm_uio.uio_iovcnt = 2;
3137 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3140 * The read/write flag isn't used by busdma, but set it just in
3141 * case. This isn't exactly accurate, either, since we're going in
3144 cm->cm_uio.uio_rw = UIO_WRITE;
3146 cm->cm_iovec[0].iov_base = request;
3147 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3148 cm->cm_iovec[1].iov_base = response;
3149 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3151 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3152 cm->cm_iovec[1].iov_len;
3155 * Trigger a warning message in mpr_data_cb() for the user if we
3156 * wind up exceeding two S/G segments. The chip expects one
3157 * segment for the request and another for the response.
3159 cm->cm_max_segs = 2;
3161 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3162 cm->cm_complete = mprsas_smpio_complete;
3163 cm->cm_complete_data = ccb;
3166 * Tell the mapping code that we're using a uio, and that this is
3167 * an SMP passthrough request. There is a little special-case
3168 * logic there (in mpr_data_cb()) to handle the bidirectional
3171 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3172 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3174 /* The chip data format is little endian. */
3175 req->SASAddress.High = htole32(sasaddr >> 32);
3176 req->SASAddress.Low = htole32(sasaddr);
3179 * XXX Note that we don't have a timeout/abort mechanism here.
3180 * From the manual, it looks like task management requests only
3181 * work for SCSI IO and SATA passthrough requests. We may need to
3182 * have a mechanism to retry requests in the event of a chip reset
3183 * at least. Hopefully the chip will insure that any errors short
3184 * of that are relayed back to the driver.
3186 error = mpr_map_command(sc, cm);
3187 if ((error != 0) && (error != EINPROGRESS)) {
3188 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3189 "mpr_map_command()\n", __func__, error);
3196 mpr_free_command(sc, cm);
3197 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3203 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3205 struct mpr_softc *sc;
3206 struct mprsas_target *targ;
3207 uint64_t sasaddr = 0;
3212 * Make sure the target exists.
3214 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3215 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3216 targ = &sassc->targets[ccb->ccb_h.target_id];
3217 if (targ->handle == 0x0) {
3218 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3219 __func__, ccb->ccb_h.target_id);
3220 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3226 * If this device has an embedded SMP target, we'll talk to it
3228 * figure out what the expander's address is.
3230 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3231 sasaddr = targ->sasaddr;
3234 * If we don't have a SAS address for the expander yet, try
3235 * grabbing it from the page 0x83 information cached in the
3236 * transport layer for this target. LSI expanders report the
3237 * expander SAS address as the port-associated SAS address in
3238 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3241 * XXX KDM disable this for now, but leave it commented out so that
3242 * it is obvious that this is another possible way to get the SAS
3245 * The parent handle method below is a little more reliable, and
3246 * the other benefit is that it works for devices other than SES
3247 * devices. So you can send a SMP request to a da(4) device and it
3248 * will get routed to the expander that device is attached to.
3249 * (Assuming the da(4) device doesn't contain an SMP target...)
3253 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3257 * If we still don't have a SAS address for the expander, look for
3258 * the parent device of this device, which is probably the expander.
3261 #ifdef OLD_MPR_PROBE
3262 struct mprsas_target *parent_target;
3265 if (targ->parent_handle == 0x0) {
3266 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3267 "a valid parent handle!\n", __func__, targ->handle);
3268 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3271 #ifdef OLD_MPR_PROBE
3272 parent_target = mprsas_find_target_by_handle(sassc, 0,
3273 targ->parent_handle);
3275 if (parent_target == NULL) {
3276 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3277 "a valid parent target!\n", __func__, targ->handle);
3278 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3282 if ((parent_target->devinfo &
3283 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3284 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3285 "does not have an SMP target!\n", __func__,
3286 targ->handle, parent_target->handle);
3287 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3291 sasaddr = parent_target->sasaddr;
3292 #else /* OLD_MPR_PROBE */
3293 if ((targ->parent_devinfo &
3294 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3295 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3296 "does not have an SMP target!\n", __func__,
3297 targ->handle, targ->parent_handle);
3298 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3302 if (targ->parent_sasaddr == 0x0) {
3303 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3304 "%d does not have a valid SAS address!\n", __func__,
3305 targ->handle, targ->parent_handle);
3306 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3310 sasaddr = targ->parent_sasaddr;
3311 #endif /* OLD_MPR_PROBE */
3316 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3317 "handle %d\n", __func__, targ->handle);
3318 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3321 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3329 #endif //__FreeBSD_version >= 900026
3332 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3334 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3335 struct mpr_softc *sc;
3336 struct mpr_command *tm;
3337 struct mprsas_target *targ;
3339 MPR_FUNCTRACE(sassc->sc);
3340 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3342 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3343 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3345 tm = mprsas_alloc_tm(sc);
3347 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3348 "mprsas_action_resetdev\n");
3349 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3354 targ = &sassc->targets[ccb->ccb_h.target_id];
3355 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3356 req->DevHandle = htole16(targ->handle);
3357 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3359 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3360 /* SAS Hard Link Reset / SATA Link Reset */
3361 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3363 /* PCIe Protocol Level Reset*/
3365 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3369 tm->cm_complete = mprsas_resetdev_complete;
3370 tm->cm_complete_data = ccb;
3372 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3373 __func__, targ->tid);
3376 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3377 mpr_map_command(sc, tm);
3381 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3383 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3387 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3389 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3390 ccb = tm->cm_complete_data;
3393 * Currently there should be no way we can hit this case. It only
3394 * happens when we have a failure to allocate chain frames, and
3395 * task management commands don't have S/G lists.
3397 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3398 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3400 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3402 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3403 "handle %#04x! This should not happen!\n", __func__,
3404 tm->cm_flags, req->DevHandle);
3405 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3409 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3410 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3412 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3413 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3414 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3418 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3422 mprsas_free_tm(sc, tm);
3427 mprsas_poll(struct cam_sim *sim)
3429 struct mprsas_softc *sassc;
3431 sassc = cam_sim_softc(sim);
3433 if (sassc->sc->mpr_debug & MPR_TRACE) {
3434 /* frequent debug messages during a panic just slow
3435 * everything down too much.
3437 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3439 sassc->sc->mpr_debug &= ~MPR_TRACE;
3442 mpr_intr_locked(sassc->sc);
3446 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3449 struct mpr_softc *sc;
3451 sc = (struct mpr_softc *)callback_arg;
3454 #if (__FreeBSD_version >= 1000006) || \
3455 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3456 case AC_ADVINFO_CHANGED: {
3457 struct mprsas_target *target;
3458 struct mprsas_softc *sassc;
3459 struct scsi_read_capacity_data_long rcap_buf;
3460 struct ccb_dev_advinfo cdai;
3461 struct mprsas_lun *lun;
3466 buftype = (uintptr_t)arg;
3472 * We're only interested in read capacity data changes.
3474 if (buftype != CDAI_TYPE_RCAPLONG)
3478 * See the comment in mpr_attach_sas() for a detailed
3479 * explanation. In these versions of FreeBSD we register
3480 * for all events and filter out the events that don't
3483 #if (__FreeBSD_version < 1000703) || \
3484 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3485 if (xpt_path_path_id(path) != sassc->sim->path_id)
3490 * We should have a handle for this, but check to make sure.
3492 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3493 ("Target %d out of bounds in mprsas_async\n",
3494 xpt_path_target_id(path)));
3495 target = &sassc->targets[xpt_path_target_id(path)];
3496 if (target->handle == 0)
3499 lunid = xpt_path_lun_id(path);
3501 SLIST_FOREACH(lun, &target->luns, lun_link) {
3502 if (lun->lun_id == lunid) {
3508 if (found_lun == 0) {
3509 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3512 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3513 "LUN for EEDP support.\n");
3516 lun->lun_id = lunid;
3517 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3520 bzero(&rcap_buf, sizeof(rcap_buf));
3521 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3522 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3523 cdai.ccb_h.flags = CAM_DIR_IN;
3524 cdai.buftype = CDAI_TYPE_RCAPLONG;
3525 #if (__FreeBSD_version >= 1100061) || \
3526 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3527 cdai.flags = CDAI_FLAG_NONE;
3531 cdai.bufsiz = sizeof(rcap_buf);
3532 cdai.buf = (uint8_t *)&rcap_buf;
3533 xpt_action((union ccb *)&cdai);
3534 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3535 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3537 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3538 && (rcap_buf.prot & SRC16_PROT_EN)) {
3539 switch (rcap_buf.prot & SRC16_P_TYPE) {
3542 lun->eedp_formatted = TRUE;
3543 lun->eedp_block_size =
3544 scsi_4btoul(rcap_buf.length);
3548 lun->eedp_formatted = FALSE;
3549 lun->eedp_block_size = 0;
3553 lun->eedp_formatted = FALSE;
3554 lun->eedp_block_size = 0;
3559 case AC_FOUND_DEVICE: {
3560 struct ccb_getdev *cgd;
3563 * See the comment in mpr_attach_sas() for a detailed
3564 * explanation. In these versions of FreeBSD we register
3565 * for all events and filter out the events that don't
3568 #if (__FreeBSD_version < 1000703) || \
3569 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3570 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3575 #if (__FreeBSD_version < 901503) || \
3576 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3577 mprsas_check_eedp(sc, path, cgd);
3586 #if (__FreeBSD_version < 901503) || \
3587 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3589 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3590 struct ccb_getdev *cgd)
3592 struct mprsas_softc *sassc = sc->sassc;
3593 struct ccb_scsiio *csio;
3594 struct scsi_read_capacity_16 *scsi_cmd;
3595 struct scsi_read_capacity_eedp *rcap_buf;
3597 target_id_t targetid;
3600 struct cam_path *local_path;
3601 struct mprsas_target *target;
3602 struct mprsas_lun *lun;
3606 pathid = cam_sim_path(sassc->sim);
3607 targetid = xpt_path_target_id(path);
3608 lunid = xpt_path_lun_id(path);
3610 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3611 "mprsas_check_eedp\n", targetid));
3612 target = &sassc->targets[targetid];
3613 if (target->handle == 0x0)
3617 * Determine if the device is EEDP capable.
3619 * If this flag is set in the inquiry data, the device supports
3620 * protection information, and must support the 16 byte read capacity
3621 * command, otherwise continue without sending read cap 16.
3623 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3627 * Issue a READ CAPACITY 16 command. This info is used to determine if
3628 * the LUN is formatted for EEDP support.
3630 ccb = xpt_alloc_ccb_nowait();
3632 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3637 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3639 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3646 * If LUN is already in list, don't create a new one.
3649 SLIST_FOREACH(lun, &target->luns, lun_link) {
3650 if (lun->lun_id == lunid) {
3656 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3659 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3661 xpt_free_path(local_path);
3665 lun->lun_id = lunid;
3666 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3669 xpt_path_string(local_path, path_str, sizeof(path_str));
3670 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3671 path_str, target->handle);
3674 * Issue a READ CAPACITY 16 command for the LUN. The
3675 * mprsas_read_cap_done function will load the read cap info into the
3678 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3680 if (rcap_buf == NULL) {
3681 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3682 "buffer for EEDP support.\n");
3683 xpt_free_path(ccb->ccb_h.path);
3687 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3689 csio->ccb_h.func_code = XPT_SCSI_IO;
3690 csio->ccb_h.flags = CAM_DIR_IN;
3691 csio->ccb_h.retry_count = 4;
3692 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3693 csio->ccb_h.timeout = 60000;
3694 csio->data_ptr = (uint8_t *)rcap_buf;
3695 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3696 csio->sense_len = MPR_SENSE_LEN;
3697 csio->cdb_len = sizeof(*scsi_cmd);
3698 csio->tag_action = MSG_SIMPLE_Q_TAG;
3700 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3701 bzero(scsi_cmd, sizeof(*scsi_cmd));
3702 scsi_cmd->opcode = 0x9E;
3703 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3704 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3706 ccb->ccb_h.ppriv_ptr1 = sassc;
3711 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3713 struct mprsas_softc *sassc;
3714 struct mprsas_target *target;
3715 struct mprsas_lun *lun;
3716 struct scsi_read_capacity_eedp *rcap_buf;
3718 if (done_ccb == NULL)
3721 /* Driver need to release devq, it Scsi command is
3722 * generated by driver internally.
3723 * Currently there is a single place where driver
3724 * calls scsi command internally. In future if driver
3725 * calls more scsi command internally, it needs to release
3726 * devq internally, since those command will not go back to
3729 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3730 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3731 xpt_release_devq(done_ccb->ccb_h.path,
3732 /*count*/ 1, /*run_queue*/TRUE);
3735 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3738 * Get the LUN ID for the path and look it up in the LUN list for the
3741 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3742 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3743 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3744 target = &sassc->targets[done_ccb->ccb_h.target_id];
3745 SLIST_FOREACH(lun, &target->luns, lun_link) {
3746 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3750 * Got the LUN in the target's LUN list. Fill it in with EEDP
3751 * info. If the READ CAP 16 command had some SCSI error (common
3752 * if command is not supported), mark the lun as not supporting
3753 * EEDP and set the block size to 0.
3755 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3756 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3757 lun->eedp_formatted = FALSE;
3758 lun->eedp_block_size = 0;
3762 if (rcap_buf->protect & 0x01) {
3763 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3764 "%d is formatted for EEDP support.\n",
3765 done_ccb->ccb_h.target_lun,
3766 done_ccb->ccb_h.target_id);
3767 lun->eedp_formatted = TRUE;
3768 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3773 // Finished with this CCB and path.
3774 free(rcap_buf, M_MPR);
3775 xpt_free_path(done_ccb->ccb_h.path);
3776 xpt_free_ccb(done_ccb);
3778 #endif /* (__FreeBSD_version < 901503) || \
3779 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3782 * Set the INRESET flag for this target so that no I/O will be sent to
3783 * the target until the reset has completed. If an I/O request does
3784 * happen, the devq will be frozen. The CCB holds the path which is
3785 * used to release the devq. The devq is released and the CCB is freed
3786 * when the TM completes.
3789 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3790 struct mprsas_target *target, lun_id_t lun_id)
3795 ccb = xpt_alloc_ccb_nowait();
3797 path_id = cam_sim_path(sc->sassc->sim);
3798 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3799 target->tid, lun_id) != CAM_REQ_CMP) {
3803 tm->cm_targ = target;
3804 target->flags |= MPRSAS_TARGET_INRESET;
3810 mprsas_startup(struct mpr_softc *sc)
3813 * Send the port enable message and set the wait_for_port_enable flag.
3814 * This flag helps to keep the simq frozen until all discovery events
3817 sc->wait_for_port_enable = 1;
3818 mprsas_send_portenable(sc);
3823 mprsas_send_portenable(struct mpr_softc *sc)
3825 MPI2_PORT_ENABLE_REQUEST *request;
3826 struct mpr_command *cm;
3830 if ((cm = mpr_alloc_command(sc)) == NULL)
3832 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3833 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3834 request->MsgFlags = 0;
3836 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3837 cm->cm_complete = mprsas_portenable_complete;
3841 mpr_map_command(sc, cm);
3842 mpr_dprint(sc, MPR_XINFO,
3843 "mpr_send_portenable finished cm %p req %p complete %p\n",
3844 cm, cm->cm_req, cm->cm_complete);
3849 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3851 MPI2_PORT_ENABLE_REPLY *reply;
3852 struct mprsas_softc *sassc;
3858 * Currently there should be no way we can hit this case. It only
3859 * happens when we have a failure to allocate chain frames, and
3860 * port enable commands don't have S/G lists.
3862 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3863 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3864 "This should not happen!\n", __func__, cm->cm_flags);
3867 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3869 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3870 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3871 MPI2_IOCSTATUS_SUCCESS)
3872 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3874 mpr_free_command(sc, cm);
3876 * Done waiting for port enable to complete. Decrement the refcount.
3877 * If refcount is 0, discovery is complete and a rescan of the bus can
3880 sc->wait_for_port_enable = 0;
3881 sc->port_enable_complete = 1;
3882 wakeup(&sc->port_enable_complete);
3883 mprsas_startup_decrement(sassc);
3887 mprsas_check_id(struct mprsas_softc *sassc, int id)
3889 struct mpr_softc *sc = sassc->sc;
3893 ids = &sc->exclude_ids[0];
3894 while((name = strsep(&ids, ",")) != NULL) {
3895 if (name[0] == '\0')
3897 if (strtol(name, NULL, 0) == (long)id)
3905 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3907 struct mprsas_softc *sassc;
3908 struct mprsas_lun *lun, *lun_tmp;
3909 struct mprsas_target *targ;
3914 * The number of targets is based on IOC Facts, so free all of
3915 * the allocated LUNs for each target and then the target buffer
3918 for (i=0; i< maxtargets; i++) {
3919 targ = &sassc->targets[i];
3920 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3924 free(sassc->targets, M_MPR);
3926 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3927 M_MPR, M_WAITOK|M_ZERO);
3928 if (!sassc->targets) {
3929 panic("%s failed to alloc targets with error %d\n",