2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for Avago Technologies (LSI) MPT3 */
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
75 #include <dev/nvme/nvme.h>
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
90 #define MPRSAS_DISCOVERY_TIMEOUT 20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
94 * static array to check SCSI OpCode for EEDP protection bits
96 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131 struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133 struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137 struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139 union ccb *done_ccb);
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143 struct mpr_command *cm);
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
156 struct mprsas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mprsas_startup_increment(struct mprsas_softc *sassc)
178 MPR_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mpr_dprint(sassc->sc, MPR_INIT,
184 "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
224 mprsas_rescan_target(sassc->sc, NULL);
227 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 sassc->startup_refcount);
233 * The firmware requires us to stop sending commands when we're doing task
236 * XXX The logic for serializing the device has been made lazy and moved to
237 * mprsas_prepare_for_tm().
240 mprsas_alloc_tm(struct mpr_softc *sc)
242 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
243 struct mpr_command *tm;
246 tm = mpr_alloc_high_priority_command(sc);
250 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
251 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
256 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
258 int target_id = 0xFFFFFFFF;
265 * For TM's the devq is frozen for the device. Unfreeze it here and
266 * free the resources used for freezing the devq. Must clear the
267 * INRESET flag as well or scsi I/O will not work.
269 if (tm->cm_targ != NULL) {
270 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
271 target_id = tm->cm_targ->tid;
274 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
276 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
277 xpt_free_path(tm->cm_ccb->ccb_h.path);
278 xpt_free_ccb(tm->cm_ccb);
281 mpr_free_high_priority_command(sc, tm);
285 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
287 struct mprsas_softc *sassc = sc->sassc;
289 target_id_t targetid;
293 pathid = cam_sim_path(sassc->sim);
295 targetid = CAM_TARGET_WILDCARD;
297 targetid = targ - sassc->targets;
300 * Allocate a CCB and schedule a rescan.
302 ccb = xpt_alloc_ccb_nowait();
304 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
308 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
309 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
310 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
315 if (targetid == CAM_TARGET_WILDCARD)
316 ccb->ccb_h.func_code = XPT_SCAN_BUS;
318 ccb->ccb_h.func_code = XPT_SCAN_TGT;
320 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
325 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
335 /* No need to be in here if debugging isn't enabled */
336 if ((cm->cm_sc->mpr_debug & level) == 0)
339 sbuf_new(&sb, str, sizeof(str), 0);
343 if (cm->cm_ccb != NULL) {
344 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
346 sbuf_cat(&sb, path_str);
347 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
348 scsi_command_string(&cm->cm_ccb->csio, &sb);
349 sbuf_printf(&sb, "length %d ",
350 cm->cm_ccb->csio.dxfer_len);
353 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
354 cam_sim_name(cm->cm_sc->sassc->sim),
355 cam_sim_unit(cm->cm_sc->sassc->sim),
356 cam_sim_bus(cm->cm_sc->sassc->sim),
357 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
361 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
362 sbuf_vprintf(&sb, fmt, ap);
364 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
370 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
372 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
373 struct mprsas_target *targ;
378 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
379 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
383 /* XXX retry the remove after the diag reset completes? */
384 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
385 "0x%04x\n", __func__, handle);
386 mprsas_free_tm(sc, tm);
390 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
391 MPI2_IOCSTATUS_SUCCESS) {
392 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
393 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
396 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
397 le32toh(reply->TerminationCount));
398 mpr_free_reply(sc, tm->cm_reply_data);
399 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
401 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
405 * Don't clear target if remove fails because things will get confusing.
406 * Leave the devname and sasaddr intact so that we know to avoid reusing
407 * this target id if possible, and so we can assign the same target id
408 * to this device if it comes back in the future.
410 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
411 MPI2_IOCSTATUS_SUCCESS) {
414 targ->encl_handle = 0x0;
415 targ->encl_level_valid = 0x0;
416 targ->encl_level = 0x0;
417 targ->connector_name[0] = ' ';
418 targ->connector_name[1] = ' ';
419 targ->connector_name[2] = ' ';
420 targ->connector_name[3] = ' ';
421 targ->encl_slot = 0x0;
422 targ->exp_dev_handle = 0x0;
424 targ->linkrate = 0x0;
427 targ->scsi_req_desc_type = 0;
430 mprsas_free_tm(sc, tm);
435 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
436 * Otherwise Volume Delete is same as Bare Drive Removal.
439 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
441 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
442 struct mpr_softc *sc;
443 struct mpr_command *cm;
444 struct mprsas_target *targ = NULL;
446 MPR_FUNCTRACE(sassc->sc);
449 targ = mprsas_find_target_by_handle(sassc, 0, handle);
451 /* FIXME: what is the action? */
452 /* We don't know about this device? */
453 mpr_dprint(sc, MPR_ERROR,
454 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
458 targ->flags |= MPRSAS_TARGET_INREMOVAL;
460 cm = mprsas_alloc_tm(sc);
462 mpr_dprint(sc, MPR_ERROR,
463 "%s: command alloc failure\n", __func__);
467 mprsas_rescan_target(sc, targ);
469 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
470 req->DevHandle = targ->handle;
471 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
473 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
474 /* SAS Hard Link Reset / SATA Link Reset */
475 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
477 /* PCIe Protocol Level Reset*/
479 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
484 cm->cm_complete = mprsas_remove_volume;
485 cm->cm_complete_data = (void *)(uintptr_t)handle;
487 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
488 __func__, targ->tid);
489 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
491 mpr_map_command(sc, cm);
495 * The firmware performs debounce on the link to avoid transient link errors
496 * and false removals. When it does decide that link has been lost and a
497 * device needs to go away, it expects that the host will perform a target reset
498 * and then an op remove. The reset has the side-effect of aborting any
499 * outstanding requests for the device, which is required for the op-remove to
500 * succeed. It's not clear if the host should check for the device coming back
501 * alive after the reset.
504 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
506 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
507 struct mpr_softc *sc;
508 struct mpr_command *tm;
509 struct mprsas_target *targ = NULL;
511 MPR_FUNCTRACE(sassc->sc);
515 targ = mprsas_find_target_by_handle(sassc, 0, handle);
517 /* FIXME: what is the action? */
518 /* We don't know about this device? */
519 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
524 targ->flags |= MPRSAS_TARGET_INREMOVAL;
526 tm = mprsas_alloc_tm(sc);
528 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
533 mprsas_rescan_target(sc, targ);
535 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
536 memset(req, 0, sizeof(*req));
537 req->DevHandle = htole16(targ->handle);
538 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
540 /* SAS Hard Link Reset / SATA Link Reset */
541 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
545 tm->cm_complete = mprsas_remove_device;
546 tm->cm_complete_data = (void *)(uintptr_t)handle;
548 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
549 __func__, targ->tid);
550 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
552 mpr_map_command(sc, tm);
556 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
558 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
559 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
560 struct mprsas_target *targ;
561 struct mpr_command *next_cm;
566 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
567 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
571 * Currently there should be no way we can hit this case. It only
572 * happens when we have a failure to allocate chain frames, and
573 * task management commands don't have S/G lists.
575 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
576 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
577 "handle %#04x! This should not happen!\n", __func__,
578 tm->cm_flags, handle);
582 /* XXX retry the remove after the diag reset completes? */
583 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
584 "0x%04x\n", __func__, handle);
585 mprsas_free_tm(sc, tm);
589 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
590 MPI2_IOCSTATUS_SUCCESS) {
591 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
592 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
595 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
596 le32toh(reply->TerminationCount));
597 mpr_free_reply(sc, tm->cm_reply_data);
598 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
600 /* Reuse the existing command */
601 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
602 memset(req, 0, sizeof(*req));
603 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
604 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
605 req->DevHandle = htole16(handle);
607 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
608 tm->cm_complete = mprsas_remove_complete;
609 tm->cm_complete_data = (void *)(uintptr_t)handle;
611 mpr_map_command(sc, tm);
613 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
615 if (targ->encl_level_valid) {
616 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
617 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
618 targ->connector_name);
620 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
623 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
624 ccb = tm->cm_complete_data;
625 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
626 mprsas_scsiio_complete(sc, tm);
631 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
633 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
635 struct mprsas_target *targ;
636 struct mprsas_lun *lun;
640 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
641 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
644 * Currently there should be no way we can hit this case. It only
645 * happens when we have a failure to allocate chain frames, and
646 * task management commands don't have S/G lists.
648 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
649 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
650 "handle %#04x! This should not happen!\n", __func__,
651 tm->cm_flags, handle);
652 mprsas_free_tm(sc, tm);
657 /* most likely a chip reset */
658 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
659 "0x%04x\n", __func__, handle);
660 mprsas_free_tm(sc, tm);
664 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
665 __func__, handle, le16toh(reply->IOCStatus));
668 * Don't clear target if remove fails because things will get confusing.
669 * Leave the devname and sasaddr intact so that we know to avoid reusing
670 * this target id if possible, and so we can assign the same target id
671 * to this device if it comes back in the future.
673 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
674 MPI2_IOCSTATUS_SUCCESS) {
677 targ->encl_handle = 0x0;
678 targ->encl_level_valid = 0x0;
679 targ->encl_level = 0x0;
680 targ->connector_name[0] = ' ';
681 targ->connector_name[1] = ' ';
682 targ->connector_name[2] = ' ';
683 targ->connector_name[3] = ' ';
684 targ->encl_slot = 0x0;
685 targ->exp_dev_handle = 0x0;
687 targ->linkrate = 0x0;
690 targ->scsi_req_desc_type = 0;
692 while (!SLIST_EMPTY(&targ->luns)) {
693 lun = SLIST_FIRST(&targ->luns);
694 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
699 mprsas_free_tm(sc, tm);
703 mprsas_register_events(struct mpr_softc *sc)
708 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
709 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
710 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
711 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
712 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
713 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
714 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
715 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
716 setbit(events, MPI2_EVENT_IR_VOLUME);
717 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
718 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
719 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
720 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
721 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
722 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
723 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
724 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
725 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
726 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
730 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
731 &sc->sassc->mprsas_eh);
737 mpr_attach_sas(struct mpr_softc *sc)
739 struct mprsas_softc *sassc;
741 int unit, error = 0, reqs;
744 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
746 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
748 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
749 "Cannot allocate SAS subsystem memory\n");
754 * XXX MaxTargets could change during a reinit. Since we don't
755 * resize the targets[] array during such an event, cache the value
756 * of MaxTargets here so that we don't get into trouble later. This
757 * should move into the reinit logic.
759 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
760 sassc->targets = malloc(sizeof(struct mprsas_target) *
761 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
762 if (!sassc->targets) {
763 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
764 "Cannot allocate SAS target memory\n");
771 reqs = sc->num_reqs - sc->num_prireqs - 1;
772 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
773 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
778 unit = device_get_unit(sc->mpr_dev);
779 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
780 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
781 if (sassc->sim == NULL) {
782 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
787 TAILQ_INIT(&sassc->ev_queue);
789 /* Initialize taskqueue for Event Handling */
790 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
791 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
792 taskqueue_thread_enqueue, &sassc->ev_tq);
793 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
794 device_get_nameunit(sc->mpr_dev));
799 * XXX There should be a bus for every port on the adapter, but since
800 * we're just going to fake the topology for now, we'll pretend that
801 * everything is just a target on a single bus.
803 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
804 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
805 "Error %d registering SCSI bus\n", error);
811 * Assume that discovery events will start right away.
813 * Hold off boot until discovery is complete.
815 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
816 sc->sassc->startup_refcount = 0;
817 mprsas_startup_increment(sassc);
819 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
822 * Register for async events so we can determine the EEDP
823 * capabilities of devices.
825 status = xpt_create_path(&sassc->path, /*periph*/NULL,
826 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
828 if (status != CAM_REQ_CMP) {
829 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
830 "Error %#x creating sim path\n", status);
835 #if (__FreeBSD_version >= 1000006) || \
836 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
837 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
839 event = AC_FOUND_DEVICE;
843 * Prior to the CAM locking improvements, we can't call
844 * xpt_register_async() with a particular path specified.
846 * If a path isn't specified, xpt_register_async() will
847 * generate a wildcard path and acquire the XPT lock while
848 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
849 * It will then drop the XPT lock once that is done.
851 * If a path is specified for xpt_register_async(), it will
852 * not acquire and drop the XPT lock around the call to
853 * xpt_action(). xpt_action() asserts that the caller
854 * holds the SIM lock, so the SIM lock has to be held when
855 * calling xpt_register_async() when the path is specified.
857 * But xpt_register_async calls xpt_for_all_devices(),
858 * which calls xptbustraverse(), which will acquire each
859 * SIM lock. When it traverses our particular bus, it will
860 * necessarily acquire the SIM lock, which will lead to a
861 * recursive lock acquisition.
863 * The CAM locking changes fix this problem by acquiring
864 * the XPT topology lock around bus traversal in
865 * xptbustraverse(), so the caller can hold the SIM lock
866 * and it does not cause a recursive lock acquisition.
868 * These __FreeBSD_version values are approximate, especially
869 * for stable/10, which is two months later than the actual
873 #if (__FreeBSD_version < 1000703) || \
874 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
876 status = xpt_register_async(event, mprsas_async, sc,
880 status = xpt_register_async(event, mprsas_async, sc,
884 if (status != CAM_REQ_CMP) {
885 mpr_dprint(sc, MPR_ERROR,
886 "Error %#x registering async handler for "
887 "AC_ADVINFO_CHANGED events\n", status);
888 xpt_free_path(sassc->path);
892 if (status != CAM_REQ_CMP) {
894 * EEDP use is the exception, not the rule.
895 * Warn the user, but do not fail to attach.
897 mpr_printf(sc, "EEDP capabilities disabled.\n");
902 mprsas_register_events(sc);
907 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
912 mpr_detach_sas(struct mpr_softc *sc)
914 struct mprsas_softc *sassc;
915 struct mprsas_lun *lun, *lun_tmp;
916 struct mprsas_target *targ;
921 if (sc->sassc == NULL)
925 mpr_deregister_events(sc, sassc->mprsas_eh);
928 * Drain and free the event handling taskqueue with the lock
929 * unheld so that any parallel processing tasks drain properly
930 * without deadlocking.
932 if (sassc->ev_tq != NULL)
933 taskqueue_free(sassc->ev_tq);
935 /* Make sure CAM doesn't wedge if we had to bail out early. */
938 while (sassc->startup_refcount != 0)
939 mprsas_startup_decrement(sassc);
941 /* Deregister our async handler */
942 if (sassc->path != NULL) {
943 xpt_register_async(0, mprsas_async, sc, sassc->path);
944 xpt_free_path(sassc->path);
948 if (sassc->flags & MPRSAS_IN_STARTUP)
949 xpt_release_simq(sassc->sim, 1);
951 if (sassc->sim != NULL) {
952 xpt_bus_deregister(cam_sim_path(sassc->sim));
953 cam_sim_free(sassc->sim, FALSE);
958 if (sassc->devq != NULL)
959 cam_simq_free(sassc->devq);
961 for (i = 0; i < sassc->maxtargets; i++) {
962 targ = &sassc->targets[i];
963 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
967 free(sassc->targets, M_MPR);
975 mprsas_discovery_end(struct mprsas_softc *sassc)
977 struct mpr_softc *sc = sassc->sc;
981 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
982 callout_stop(&sassc->discovery_callout);
985 * After discovery has completed, check the mapping table for any
986 * missing devices and update their missing counts. Only do this once
987 * whenever the driver is initialized so that missing counts aren't
988 * updated unnecessarily. Note that just because discovery has
989 * completed doesn't mean that events have been processed yet. The
990 * check_devices function is a callout timer that checks if ALL devices
991 * are missing. If so, it will wait a little longer for events to
992 * complete and keep resetting itself until some device in the mapping
993 * table is not missing, meaning that event processing has started.
995 if (sc->track_mapping_events) {
996 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
997 "completed. Check for missing devices in the mapping "
999 callout_reset(&sc->device_check_callout,
1000 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1006 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1008 struct mprsas_softc *sassc;
1010 sassc = cam_sim_softc(sim);
1012 MPR_FUNCTRACE(sassc->sc);
1013 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1014 ccb->ccb_h.func_code);
1015 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1017 switch (ccb->ccb_h.func_code) {
1020 struct ccb_pathinq *cpi = &ccb->cpi;
1021 struct mpr_softc *sc = sassc->sc;
1023 cpi->version_num = 1;
1024 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1025 cpi->target_sprt = 0;
1026 #if (__FreeBSD_version >= 1000039) || \
1027 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1028 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1030 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1032 cpi->hba_eng_cnt = 0;
1033 cpi->max_target = sassc->maxtargets - 1;
1037 * initiator_id is set here to an ID outside the set of valid
1038 * target IDs (including volumes).
1040 cpi->initiator_id = sassc->maxtargets;
1041 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1042 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1043 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1044 cpi->unit_number = cam_sim_unit(sim);
1045 cpi->bus_id = cam_sim_bus(sim);
1047 * XXXSLM-I think this needs to change based on config page or
1048 * something instead of hardcoded to 150000.
1050 cpi->base_transfer_speed = 150000;
1051 cpi->transport = XPORT_SAS;
1052 cpi->transport_version = 0;
1053 cpi->protocol = PROTO_SCSI;
1054 cpi->protocol_version = SCSI_REV_SPC;
1055 cpi->maxio = sc->maxio;
1056 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1059 case XPT_GET_TRAN_SETTINGS:
1061 struct ccb_trans_settings *cts;
1062 struct ccb_trans_settings_sas *sas;
1063 struct ccb_trans_settings_scsi *scsi;
1064 struct mprsas_target *targ;
1067 sas = &cts->xport_specific.sas;
1068 scsi = &cts->proto_specific.scsi;
1070 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1071 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1072 cts->ccb_h.target_id));
1073 targ = &sassc->targets[cts->ccb_h.target_id];
1074 if (targ->handle == 0x0) {
1075 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1079 cts->protocol_version = SCSI_REV_SPC2;
1080 cts->transport = XPORT_SAS;
1081 cts->transport_version = 0;
1083 sas->valid = CTS_SAS_VALID_SPEED;
1084 switch (targ->linkrate) {
1086 sas->bitrate = 150000;
1089 sas->bitrate = 300000;
1092 sas->bitrate = 600000;
1095 sas->bitrate = 1200000;
1101 cts->protocol = PROTO_SCSI;
1102 scsi->valid = CTS_SCSI_VALID_TQ;
1103 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1105 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1108 case XPT_CALC_GEOMETRY:
1109 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1110 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1113 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1115 mprsas_action_resetdev(sassc, ccb);
1120 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1121 "for abort or reset\n");
1122 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1125 mprsas_action_scsiio(sassc, ccb);
1127 #if __FreeBSD_version >= 900026
1129 mprsas_action_smpio(sassc, ccb);
1133 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1141 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1142 target_id_t target_id, lun_id_t lun_id)
1144 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1145 struct cam_path *path;
1147 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1148 ac_code, target_id, (uintmax_t)lun_id);
1150 if (xpt_create_path(&path, NULL,
1151 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1152 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1157 xpt_async(ac_code, path, NULL);
1158 xpt_free_path(path);
1162 mprsas_complete_all_commands(struct mpr_softc *sc)
1164 struct mpr_command *cm;
1169 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1171 /* complete all commands with a NULL reply */
1172 for (i = 1; i < sc->num_reqs; i++) {
1173 cm = &sc->commands[i];
1174 if (cm->cm_state == MPR_CM_STATE_FREE)
1177 cm->cm_state = MPR_CM_STATE_BUSY;
1178 cm->cm_reply = NULL;
1181 if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1183 free(cm->cm_data, M_MPR);
1187 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1188 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1190 if (cm->cm_complete != NULL) {
1191 mprsas_log_command(cm, MPR_RECOVERY,
1192 "completing cm %p state %x ccb %p for diag reset\n",
1193 cm, cm->cm_state, cm->cm_ccb);
1194 cm->cm_complete(sc, cm);
1196 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1197 mprsas_log_command(cm, MPR_RECOVERY,
1198 "waking up cm %p state %x ccb %p for diag reset\n",
1199 cm, cm->cm_state, cm->cm_ccb);
1204 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1205 /* this should never happen, but if it does, log */
1206 mprsas_log_command(cm, MPR_RECOVERY,
1207 "cm %p state %x flags 0x%x ccb %p during diag "
1208 "reset\n", cm, cm->cm_state, cm->cm_flags,
1213 sc->io_cmds_active = 0;
1217 mprsas_handle_reinit(struct mpr_softc *sc)
1221 /* Go back into startup mode and freeze the simq, so that CAM
1222 * doesn't send any commands until after we've rediscovered all
1223 * targets and found the proper device handles for them.
1225 * After the reset, portenable will trigger discovery, and after all
1226 * discovery-related activities have finished, the simq will be
1229 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1230 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1231 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1232 mprsas_startup_increment(sc->sassc);
1234 /* notify CAM of a bus reset */
1235 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1238 /* complete and cleanup after all outstanding commands */
1239 mprsas_complete_all_commands(sc);
1241 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1242 __func__, sc->sassc->startup_refcount);
1244 /* zero all the target handles, since they may change after the
1245 * reset, and we have to rediscover all the targets and use the new
1248 for (i = 0; i < sc->sassc->maxtargets; i++) {
1249 if (sc->sassc->targets[i].outstanding != 0)
1250 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1251 i, sc->sassc->targets[i].outstanding);
1252 sc->sassc->targets[i].handle = 0x0;
1253 sc->sassc->targets[i].exp_dev_handle = 0x0;
1254 sc->sassc->targets[i].outstanding = 0;
1255 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1259 mprsas_tm_timeout(void *data)
1261 struct mpr_command *tm = data;
1262 struct mpr_softc *sc = tm->cm_sc;
1264 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1266 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1269 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1270 ("command not inqueue\n"));
1272 tm->cm_state = MPR_CM_STATE_BUSY;
1277 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1279 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1280 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1281 unsigned int cm_count = 0;
1282 struct mpr_command *cm;
1283 struct mprsas_target *targ;
1285 callout_stop(&tm->cm_callout);
1287 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1288 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1292 * Currently there should be no way we can hit this case. It only
1293 * happens when we have a failure to allocate chain frames, and
1294 * task management commands don't have S/G lists.
1296 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1297 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1298 "%s: cm_flags = %#x for LUN reset! "
1299 "This should not happen!\n", __func__, tm->cm_flags);
1300 mprsas_free_tm(sc, tm);
1304 if (reply == NULL) {
1305 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1307 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1308 /* this completion was due to a reset, just cleanup */
1309 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1310 "reset, ignoring NULL LUN reset reply\n");
1312 mprsas_free_tm(sc, tm);
1315 /* we should have gotten a reply. */
1316 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1317 "LUN reset attempt, resetting controller\n");
1323 mpr_dprint(sc, MPR_RECOVERY,
1324 "logical unit reset status 0x%x code 0x%x count %u\n",
1325 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1326 le32toh(reply->TerminationCount));
1329 * See if there are any outstanding commands for this LUN.
1330 * This could be made more efficient by using a per-LU data
1331 * structure of some sort.
1333 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1334 if (cm->cm_lun == tm->cm_lun)
1338 if (cm_count == 0) {
1339 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1340 "Finished recovery after LUN reset for target %u\n",
1343 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1347 * We've finished recovery for this logical unit. check and
1348 * see if some other logical unit has a timedout command
1349 * that needs to be processed.
1351 cm = TAILQ_FIRST(&targ->timedout_commands);
1353 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1354 "More commands to abort for target %u\n", targ->tid);
1355 mprsas_send_abort(sc, tm, cm);
1358 mprsas_free_tm(sc, tm);
1361 /* if we still have commands for this LUN, the reset
1362 * effectively failed, regardless of the status reported.
1363 * Escalate to a target reset.
1365 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1366 "logical unit reset complete for target %u, but still "
1367 "have %u command(s), sending target reset\n", targ->tid,
1369 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1370 mprsas_send_reset(sc, tm,
1371 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1378 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1380 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1381 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1382 struct mprsas_target *targ;
1384 callout_stop(&tm->cm_callout);
1386 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1387 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1391 * Currently there should be no way we can hit this case. It only
1392 * happens when we have a failure to allocate chain frames, and
1393 * task management commands don't have S/G lists.
1395 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1396 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1397 "reset! This should not happen!\n", __func__, tm->cm_flags);
1398 mprsas_free_tm(sc, tm);
1402 if (reply == NULL) {
1403 mpr_dprint(sc, MPR_RECOVERY,
1404 "NULL target reset reply for tm %p TaskMID %u\n",
1405 tm, le16toh(req->TaskMID));
1406 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1407 /* this completion was due to a reset, just cleanup */
1408 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1409 "reset, ignoring NULL target reset reply\n");
1411 mprsas_free_tm(sc, tm);
1414 /* we should have gotten a reply. */
1415 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1416 "target reset attempt, resetting controller\n");
1422 mpr_dprint(sc, MPR_RECOVERY,
1423 "target reset status 0x%x code 0x%x count %u\n",
1424 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1425 le32toh(reply->TerminationCount));
1427 if (targ->outstanding == 0) {
1429 * We've finished recovery for this target and all
1430 * of its logical units.
1432 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1433 "Finished reset recovery for target %u\n", targ->tid);
1435 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1439 mprsas_free_tm(sc, tm);
1442 * After a target reset, if this target still has
1443 * outstanding commands, the reset effectively failed,
1444 * regardless of the status reported. escalate.
1446 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1447 "Target reset complete for target %u, but still have %u "
1448 "command(s), resetting controller\n", targ->tid,
1454 #define MPR_RESET_TIMEOUT 30
1457 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1459 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1460 struct mprsas_target *target;
1463 target = tm->cm_targ;
1464 if (target->handle == 0) {
1465 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1466 "%d\n", __func__, target->tid);
1470 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1471 req->DevHandle = htole16(target->handle);
1472 req->TaskType = type;
1474 if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1475 timeout = MPR_RESET_TIMEOUT;
1477 * Target reset method =
1478 * SAS Hard Link Reset / SATA Link Reset
1480 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1482 timeout = (target->controller_reset_timeout) ? (
1483 target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1484 /* PCIe Protocol Level Reset*/
1486 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1489 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1490 /* XXX Need to handle invalid LUNs */
1491 MPR_SET_LUN(req->LUN, tm->cm_lun);
1492 tm->cm_targ->logical_unit_resets++;
1493 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1494 "Sending logical unit reset to target %u lun %d\n",
1495 target->tid, tm->cm_lun);
1496 tm->cm_complete = mprsas_logical_unit_reset_complete;
1497 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1498 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1499 tm->cm_targ->target_resets++;
1500 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1501 "Sending target reset to target %u\n", target->tid);
1502 tm->cm_complete = mprsas_target_reset_complete;
1503 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1506 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1510 if (target->encl_level_valid) {
1511 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1512 "At enclosure level %d, slot %d, connector name (%4s)\n",
1513 target->encl_level, target->encl_slot,
1514 target->connector_name);
1518 tm->cm_complete_data = (void *)tm;
1520 callout_reset(&tm->cm_callout, timeout * hz,
1521 mprsas_tm_timeout, tm);
1523 err = mpr_map_command(sc, tm);
1525 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1526 "error %d sending reset type %u\n", err, type);
1533 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1535 struct mpr_command *cm;
1536 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1537 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1538 struct mprsas_target *targ;
1540 callout_stop(&tm->cm_callout);
1542 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1543 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1547 * Currently there should be no way we can hit this case. It only
1548 * happens when we have a failure to allocate chain frames, and
1549 * task management commands don't have S/G lists.
1551 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1552 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1553 "cm_flags = %#x for abort %p TaskMID %u!\n",
1554 tm->cm_flags, tm, le16toh(req->TaskMID));
1555 mprsas_free_tm(sc, tm);
1559 if (reply == NULL) {
1560 mpr_dprint(sc, MPR_RECOVERY,
1561 "NULL abort reply for tm %p TaskMID %u\n",
1562 tm, le16toh(req->TaskMID));
1563 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1564 /* this completion was due to a reset, just cleanup */
1565 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1566 "reset, ignoring NULL abort reply\n");
1568 mprsas_free_tm(sc, tm);
1570 /* we should have gotten a reply. */
1571 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1572 "abort attempt, resetting controller\n");
1578 mpr_dprint(sc, MPR_RECOVERY,
1579 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1580 le16toh(req->TaskMID),
1581 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1582 le32toh(reply->TerminationCount));
1584 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1587 * if there are no more timedout commands, we're done with
1588 * error recovery for this target.
1590 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1591 "Finished abort recovery for target %u\n", targ->tid);
1593 mprsas_free_tm(sc, tm);
1594 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1595 /* abort success, but we have more timedout commands to abort */
1596 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1597 "Continuing abort recovery for target %u\n", targ->tid);
1598 mprsas_send_abort(sc, tm, cm);
1601 * we didn't get a command completion, so the abort
1602 * failed as far as we're concerned. escalate.
1604 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1605 "Abort failed for target %u, sending logical unit reset\n",
1608 mprsas_send_reset(sc, tm,
1609 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1613 #define MPR_ABORT_TIMEOUT 5
1616 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1617 struct mpr_command *cm)
1619 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1620 struct mprsas_target *targ;
1624 if (targ->handle == 0) {
1625 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1626 "%s null devhandle for target_id %d\n",
1627 __func__, cm->cm_ccb->ccb_h.target_id);
1631 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1632 "Aborting command %p\n", cm);
1634 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1635 req->DevHandle = htole16(targ->handle);
1636 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1638 /* XXX Need to handle invalid LUNs */
1639 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1641 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1644 tm->cm_complete = mprsas_abort_complete;
1645 tm->cm_complete_data = (void *)tm;
1646 tm->cm_targ = cm->cm_targ;
1647 tm->cm_lun = cm->cm_lun;
1649 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1650 timeout = MPR_ABORT_TIMEOUT;
1652 timeout = sc->nvme_abort_timeout;
1654 callout_reset(&tm->cm_callout, timeout * hz,
1655 mprsas_tm_timeout, tm);
1659 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1661 err = mpr_map_command(sc, tm);
1663 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1664 "error %d sending abort for cm %p SMID %u\n",
1665 err, cm, req->TaskMID);
1670 mprsas_scsiio_timeout(void *data)
1672 sbintime_t elapsed, now;
1674 struct mpr_softc *sc;
1675 struct mpr_command *cm;
1676 struct mprsas_target *targ;
1678 cm = (struct mpr_command *)data;
1684 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1686 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1689 * Run the interrupt handler to make sure it's not pending. This
1690 * isn't perfect because the command could have already completed
1691 * and been re-used, though this is unlikely.
1693 mpr_intr_locked(sc);
1694 if (cm->cm_state != MPR_CM_STATE_INQUEUE) {
1695 mprsas_log_command(cm, MPR_XINFO,
1696 "SCSI command %p almost timed out\n", cm);
1700 if (cm->cm_ccb == NULL) {
1701 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1708 elapsed = now - ccb->ccb_h.qos.sim_data;
1709 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1710 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1711 targ->tid, targ->handle, ccb->ccb_h.timeout,
1712 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1713 if (targ->encl_level_valid) {
1714 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1715 "At enclosure level %d, slot %d, connector name (%4s)\n",
1716 targ->encl_level, targ->encl_slot, targ->connector_name);
1719 /* XXX first, check the firmware state, to see if it's still
1720 * operational. if not, do a diag reset.
1722 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1723 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1724 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1726 if (targ->tm != NULL) {
1727 /* target already in recovery, just queue up another
1728 * timedout command to be processed later.
1730 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1731 "processing by tm %p\n", cm, targ->tm);
1733 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1735 /* start recovery by aborting the first timedout command */
1736 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1737 "Sending abort to target %u for SMID %d\n", targ->tid,
1738 cm->cm_desc.Default.SMID);
1739 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1741 mprsas_send_abort(sc, targ->tm, cm);
1744 /* XXX queue this target up for recovery once a TM becomes
1745 * available. The firmware only has a limited number of
1746 * HighPriority credits for the high priority requests used
1747 * for task management, and we ran out.
1749 * Isilon: don't worry about this for now, since we have
1750 * more credits than disks in an enclosure, and limit
1751 * ourselves to one TM per target for recovery.
1753 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1754 "timedout cm %p failed to allocate a tm\n", cm);
1759 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1761 * Return 0 - for success,
1762 * 1 - to immediately return back the command with success status to CAM
1763 * negative value - to fallback to firmware path i.e. issue scsi unmap
1764 * to FW without any translation.
1767 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1768 union ccb *ccb, struct mprsas_target *targ)
1770 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1771 struct ccb_scsiio *csio;
1772 struct unmap_parm_list *plist;
1773 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1774 struct nvme_command *c;
1776 uint16_t ndesc, list_len, data_length;
1777 struct mpr_prp_page *prp_page_info;
1778 uint64_t nvme_dsm_ranges_dma_handle;
1781 #if __FreeBSD_version >= 1100103
1782 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1784 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1785 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1786 ccb->csio.cdb_io.cdb_ptr[8]);
1788 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1789 ccb->csio.cdb_io.cdb_bytes[8]);
1793 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1797 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1799 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1800 "save UNMAP data\n");
1804 /* Copy SCSI unmap data to a local buffer */
1805 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1807 /* return back the unmap command to CAM with success status,
1808 * if number of descripts is zero.
1810 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1812 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1813 "UNMAP cmd is Zero\n");
1818 data_length = ndesc * sizeof(struct nvme_dsm_range);
1819 if (data_length > targ->MDTS) {
1820 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1821 "Device's MDTS: %d\n", data_length, targ->MDTS);
1826 prp_page_info = mpr_alloc_prp_page(sc);
1827 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1828 "UNMAP command.\n", __func__));
1831 * Insert the allocated PRP page into the command's PRP page list. This
1832 * will be freed when the command is freed.
1834 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1836 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1837 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1839 bzero(nvme_dsm_ranges, data_length);
1841 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1842 * for each descriptors contained in SCSI UNMAP data.
1844 for (i = 0; i < ndesc; i++) {
1845 nvme_dsm_ranges[i].length =
1846 htole32(be32toh(plist->desc[i].nlb));
1847 nvme_dsm_ranges[i].starting_lba =
1848 htole64(be64toh(plist->desc[i].slba));
1849 nvme_dsm_ranges[i].attributes = 0;
1852 /* Build MPI2.6's NVMe Encapsulated Request Message */
1853 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1854 bzero(req, sizeof(*req));
1855 req->DevHandle = htole16(targ->handle);
1856 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1857 req->Flags = MPI26_NVME_FLAGS_WRITE;
1858 req->ErrorResponseBaseAddress.High =
1859 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1860 req->ErrorResponseBaseAddress.Low =
1861 htole32(cm->cm_sense_busaddr);
1862 req->ErrorResponseAllocationLength =
1863 htole16(sizeof(struct nvme_completion));
1864 req->EncapsulatedCommandLength =
1865 htole16(sizeof(struct nvme_command));
1866 req->DataLength = htole32(data_length);
1868 /* Build NVMe DSM command */
1869 c = (struct nvme_command *) req->NVMe_Command;
1870 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1871 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1872 c->cdw10 = htole32(ndesc - 1);
1873 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1875 cm->cm_length = data_length;
1878 cm->cm_complete = mprsas_scsiio_complete;
1879 cm->cm_complete_data = ccb;
1881 cm->cm_lun = csio->ccb_h.target_lun;
1884 cm->cm_desc.Default.RequestFlags =
1885 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1887 csio->ccb_h.qos.sim_data = sbinuptime();
1888 #if __FreeBSD_version >= 1000029
1889 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1890 mprsas_scsiio_timeout, cm, 0);
1891 #else //__FreeBSD_version < 1000029
1892 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1893 mprsas_scsiio_timeout, cm);
1894 #endif //__FreeBSD_version >= 1000029
1897 targ->outstanding++;
1898 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1899 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1901 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1902 __func__, cm, ccb, targ->outstanding);
1904 mpr_build_nvme_prp(sc, cm, req,
1905 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1906 mpr_map_command(sc, cm);
1914 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1916 MPI2_SCSI_IO_REQUEST *req;
1917 struct ccb_scsiio *csio;
1918 struct mpr_softc *sc;
1919 struct mprsas_target *targ;
1920 struct mprsas_lun *lun;
1921 struct mpr_command *cm;
1922 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1923 uint16_t eedp_flags;
1924 uint32_t mpi_control;
1929 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1932 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1933 ("Target %d out of bounds in XPT_SCSI_IO\n",
1934 csio->ccb_h.target_id));
1935 targ = &sassc->targets[csio->ccb_h.target_id];
1936 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1937 if (targ->handle == 0x0) {
1938 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1939 __func__, csio->ccb_h.target_id);
1940 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1944 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1945 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1946 "supported %u\n", __func__, csio->ccb_h.target_id);
1947 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1952 * Sometimes, it is possible to get a command that is not "In
1953 * Progress" and was actually aborted by the upper layer. Check for
1954 * this here and complete the command without error.
1956 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1957 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1958 "target %u\n", __func__, csio->ccb_h.target_id);
1963 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1964 * that the volume has timed out. We want volumes to be enumerated
1965 * until they are deleted/removed, not just failed.
1967 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1968 if (targ->devinfo == 0)
1969 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1971 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1976 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1977 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1978 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1984 * If target has a reset in progress, freeze the devq and return. The
1985 * devq will be released when the TM reset is finished.
1987 if (targ->flags & MPRSAS_TARGET_INRESET) {
1988 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1989 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1990 __func__, targ->tid);
1991 xpt_freeze_devq(ccb->ccb_h.path, 1);
1996 cm = mpr_alloc_command(sc);
1997 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1999 mpr_free_command(sc, cm);
2001 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2002 xpt_freeze_simq(sassc->sim, 1);
2003 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2005 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2006 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2011 /* For NVME device's issue UNMAP command directly to NVME drives by
2012 * constructing equivalent native NVMe DataSetManagement command.
2014 #if __FreeBSD_version >= 1100103
2015 scsi_opcode = scsiio_cdb_ptr(csio)[0];
2017 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2018 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2020 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2022 if (scsi_opcode == UNMAP &&
2024 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2025 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2026 if (rc == 1) { /* return command to CAM with success status */
2027 mpr_free_command(sc, cm);
2028 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2031 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2035 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2036 bzero(req, sizeof(*req));
2037 req->DevHandle = htole16(targ->handle);
2038 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2040 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2041 req->SenseBufferLength = MPR_SENSE_LEN;
2043 req->ChainOffset = 0;
2044 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2049 req->DataLength = htole32(csio->dxfer_len);
2050 req->BidirectionalDataLength = 0;
2051 req->IoFlags = htole16(csio->cdb_len);
2054 /* Note: BiDirectional transfers are not supported */
2055 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2057 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2058 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2061 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2062 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2066 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2070 if (csio->cdb_len == 32)
2071 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2073 * It looks like the hardware doesn't require an explicit tag
2074 * number for each transaction. SAM Task Management not supported
2077 switch (csio->tag_action) {
2078 case MSG_HEAD_OF_Q_TAG:
2079 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2081 case MSG_ORDERED_Q_TAG:
2082 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2085 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2087 case CAM_TAG_ACTION_NONE:
2088 case MSG_SIMPLE_Q_TAG:
2090 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2093 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2094 req->Control = htole32(mpi_control);
2096 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2097 mpr_free_command(sc, cm);
2098 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2103 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2104 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2106 KASSERT(csio->cdb_len <= IOCDBLEN,
2107 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2108 "is not set", csio->cdb_len));
2109 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2111 req->IoFlags = htole16(csio->cdb_len);
2114 * Check if EEDP is supported and enabled. If it is then check if the
2115 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2116 * is formatted for EEDP support. If all of this is true, set CDB up
2117 * for EEDP transfer.
2119 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2120 if (sc->eedp_enabled && eedp_flags) {
2121 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2122 if (lun->lun_id == csio->ccb_h.target_lun) {
2127 if ((lun != NULL) && (lun->eedp_formatted)) {
2128 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2129 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2130 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2131 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2132 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2134 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2136 req->EEDPFlags = htole16(eedp_flags);
2139 * If CDB less than 32, fill in Primary Ref Tag with
2140 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2141 * already there. Also, set protection bit. FreeBSD
2142 * currently does not support CDBs bigger than 16, but
2143 * the code doesn't hurt, and will be here for the
2146 if (csio->cdb_len != 32) {
2147 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2148 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2149 PrimaryReferenceTag;
2150 for (i = 0; i < 4; i++) {
2152 req->CDB.CDB32[lba_byte + i];
2155 req->CDB.EEDP32.PrimaryReferenceTag =
2157 CDB.EEDP32.PrimaryReferenceTag);
2158 req->CDB.EEDP32.PrimaryApplicationTagMask =
2161 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2164 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2165 req->EEDPFlags = htole16(eedp_flags);
2166 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2172 cm->cm_length = csio->dxfer_len;
2173 if (cm->cm_length != 0) {
2175 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2179 cm->cm_sge = &req->SGL;
2180 cm->cm_sglsize = (32 - 24) * 4;
2181 cm->cm_complete = mprsas_scsiio_complete;
2182 cm->cm_complete_data = ccb;
2184 cm->cm_lun = csio->ccb_h.target_lun;
2187 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2188 * and set descriptor type.
2190 if (targ->scsi_req_desc_type ==
2191 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2192 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2193 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2194 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2195 if (!sc->atomic_desc_capable) {
2196 cm->cm_desc.FastPathSCSIIO.DevHandle =
2197 htole16(targ->handle);
2200 cm->cm_desc.SCSIIO.RequestFlags =
2201 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2202 if (!sc->atomic_desc_capable)
2203 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2206 csio->ccb_h.qos.sim_data = sbinuptime();
2207 #if __FreeBSD_version >= 1000029
2208 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2209 mprsas_scsiio_timeout, cm, 0);
2210 #else //__FreeBSD_version < 1000029
2211 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2212 mprsas_scsiio_timeout, cm);
2213 #endif //__FreeBSD_version >= 1000029
2216 targ->outstanding++;
2217 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2218 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2220 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2221 __func__, cm, ccb, targ->outstanding);
2223 mpr_map_command(sc, cm);
2228 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2231 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2232 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2236 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2237 MPI2_IOCSTATUS_MASK;
2238 u8 scsi_state = mpi_reply->SCSIState;
2239 u8 scsi_status = mpi_reply->SCSIStatus;
2240 char *desc_ioc_state = NULL;
2241 char *desc_scsi_status = NULL;
2242 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2244 if (log_info == 0x31170000)
2247 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2249 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2252 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2253 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2254 if (targ->encl_level_valid) {
2255 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2256 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2257 targ->connector_name);
2261 * We can add more detail about underflow data here
2264 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2265 "scsi_state %b\n", desc_scsi_status, scsi_status,
2266 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2267 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2269 if (sc->mpr_debug & MPR_XINFO &&
2270 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2271 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2272 scsi_sense_print(csio);
2273 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2276 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2277 response_info = le32toh(mpi_reply->ResponseInfo);
2278 response_bytes = (u8 *)&response_info;
2279 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2281 mpr_describe_table(mpr_scsi_taskmgmt_string,
2282 response_bytes[0]));
2286 /** mprsas_nvme_trans_status_code
2288 * Convert Native NVMe command error status to
2289 * equivalent SCSI error status.
2291 * Returns appropriate scsi_status
2294 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2295 struct mpr_command *cm)
2297 u8 status = MPI2_SCSI_STATUS_GOOD;
2298 int skey, asc, ascq;
2299 union ccb *ccb = cm->cm_complete_data;
2300 int returned_sense_len;
2303 sct = NVME_STATUS_GET_SCT(nvme_status);
2304 sc = NVME_STATUS_GET_SC(nvme_status);
2306 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2307 skey = SSD_KEY_ILLEGAL_REQUEST;
2308 asc = SCSI_ASC_NO_SENSE;
2309 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2312 case NVME_SCT_GENERIC:
2314 case NVME_SC_SUCCESS:
2315 status = MPI2_SCSI_STATUS_GOOD;
2316 skey = SSD_KEY_NO_SENSE;
2317 asc = SCSI_ASC_NO_SENSE;
2318 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2320 case NVME_SC_INVALID_OPCODE:
2321 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2322 skey = SSD_KEY_ILLEGAL_REQUEST;
2323 asc = SCSI_ASC_ILLEGAL_COMMAND;
2324 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2326 case NVME_SC_INVALID_FIELD:
2327 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2328 skey = SSD_KEY_ILLEGAL_REQUEST;
2329 asc = SCSI_ASC_INVALID_CDB;
2330 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2332 case NVME_SC_DATA_TRANSFER_ERROR:
2333 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2334 skey = SSD_KEY_MEDIUM_ERROR;
2335 asc = SCSI_ASC_NO_SENSE;
2336 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2338 case NVME_SC_ABORTED_POWER_LOSS:
2339 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2340 skey = SSD_KEY_ABORTED_COMMAND;
2341 asc = SCSI_ASC_WARNING;
2342 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2344 case NVME_SC_INTERNAL_DEVICE_ERROR:
2345 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2346 skey = SSD_KEY_HARDWARE_ERROR;
2347 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2348 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2350 case NVME_SC_ABORTED_BY_REQUEST:
2351 case NVME_SC_ABORTED_SQ_DELETION:
2352 case NVME_SC_ABORTED_FAILED_FUSED:
2353 case NVME_SC_ABORTED_MISSING_FUSED:
2354 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2355 skey = SSD_KEY_ABORTED_COMMAND;
2356 asc = SCSI_ASC_NO_SENSE;
2357 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2359 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2360 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2361 skey = SSD_KEY_ILLEGAL_REQUEST;
2362 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2363 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2365 case NVME_SC_LBA_OUT_OF_RANGE:
2366 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2367 skey = SSD_KEY_ILLEGAL_REQUEST;
2368 asc = SCSI_ASC_ILLEGAL_BLOCK;
2369 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2371 case NVME_SC_CAPACITY_EXCEEDED:
2372 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2373 skey = SSD_KEY_MEDIUM_ERROR;
2374 asc = SCSI_ASC_NO_SENSE;
2375 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2377 case NVME_SC_NAMESPACE_NOT_READY:
2378 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2379 skey = SSD_KEY_NOT_READY;
2380 asc = SCSI_ASC_LUN_NOT_READY;
2381 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2385 case NVME_SCT_COMMAND_SPECIFIC:
2387 case NVME_SC_INVALID_FORMAT:
2388 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2389 skey = SSD_KEY_ILLEGAL_REQUEST;
2390 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2391 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2393 case NVME_SC_CONFLICTING_ATTRIBUTES:
2394 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2395 skey = SSD_KEY_ILLEGAL_REQUEST;
2396 asc = SCSI_ASC_INVALID_CDB;
2397 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2401 case NVME_SCT_MEDIA_ERROR:
2403 case NVME_SC_WRITE_FAULTS:
2404 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2405 skey = SSD_KEY_MEDIUM_ERROR;
2406 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2407 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2409 case NVME_SC_UNRECOVERED_READ_ERROR:
2410 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2411 skey = SSD_KEY_MEDIUM_ERROR;
2412 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2413 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2415 case NVME_SC_GUARD_CHECK_ERROR:
2416 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2417 skey = SSD_KEY_MEDIUM_ERROR;
2418 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2419 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2421 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2422 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2423 skey = SSD_KEY_MEDIUM_ERROR;
2424 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2425 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2427 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2428 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2429 skey = SSD_KEY_MEDIUM_ERROR;
2430 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2431 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2433 case NVME_SC_COMPARE_FAILURE:
2434 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2435 skey = SSD_KEY_MISCOMPARE;
2436 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2437 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2439 case NVME_SC_ACCESS_DENIED:
2440 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2441 skey = SSD_KEY_ILLEGAL_REQUEST;
2442 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2443 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2449 returned_sense_len = sizeof(struct scsi_sense_data);
2450 if (returned_sense_len < ccb->csio.sense_len)
2451 ccb->csio.sense_resid = ccb->csio.sense_len -
2454 ccb->csio.sense_resid = 0;
2456 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2457 1, skey, asc, ascq, SSD_ELEM_NONE);
2458 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2463 /** mprsas_complete_nvme_unmap
2465 * Complete native NVMe command issued using NVMe Encapsulated
2469 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2471 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2472 struct nvme_completion *nvme_completion = NULL;
2473 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2475 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2476 if (le16toh(mpi_reply->ErrorResponseCount)){
2477 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2478 scsi_status = mprsas_nvme_trans_status_code(
2479 nvme_completion->status, cm);
2485 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2487 MPI2_SCSI_IO_REPLY *rep;
2489 struct ccb_scsiio *csio;
2490 struct mprsas_softc *sassc;
2491 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2492 u8 *TLR_bits, TLR_on, *scsi_cdb;
2495 struct mprsas_target *target;
2496 target_id_t target_id;
2499 mpr_dprint(sc, MPR_TRACE,
2500 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2501 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2502 cm->cm_targ->outstanding);
2504 callout_stop(&cm->cm_callout);
2505 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2508 ccb = cm->cm_complete_data;
2510 target_id = csio->ccb_h.target_id;
2511 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2513 * XXX KDM if the chain allocation fails, does it matter if we do
2514 * the sync and unload here? It is simpler to do it in every case,
2515 * assuming it doesn't cause problems.
2517 if (cm->cm_data != NULL) {
2518 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2519 dir = BUS_DMASYNC_POSTREAD;
2520 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2521 dir = BUS_DMASYNC_POSTWRITE;
2522 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2523 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2526 cm->cm_targ->completed++;
2527 cm->cm_targ->outstanding--;
2528 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2529 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2531 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2532 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2533 cm->cm_state = MPR_CM_STATE_BUSY;
2534 if (cm->cm_reply != NULL)
2535 mprsas_log_command(cm, MPR_RECOVERY,
2536 "completed timedout cm %p ccb %p during recovery "
2537 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2538 le16toh(rep->IOCStatus), rep->SCSIStatus,
2539 rep->SCSIState, le32toh(rep->TransferCount));
2541 mprsas_log_command(cm, MPR_RECOVERY,
2542 "completed timedout cm %p ccb %p during recovery\n",
2544 } else if (cm->cm_targ->tm != NULL) {
2545 if (cm->cm_reply != NULL)
2546 mprsas_log_command(cm, MPR_RECOVERY,
2547 "completed cm %p ccb %p during recovery "
2548 "ioc %x scsi %x state %x xfer %u\n",
2549 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2550 rep->SCSIStatus, rep->SCSIState,
2551 le32toh(rep->TransferCount));
2553 mprsas_log_command(cm, MPR_RECOVERY,
2554 "completed cm %p ccb %p during recovery\n",
2556 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2557 mprsas_log_command(cm, MPR_RECOVERY,
2558 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2561 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2563 * We ran into an error after we tried to map the command,
2564 * so we're getting a callback without queueing the command
2565 * to the hardware. So we set the status here, and it will
2566 * be retained below. We'll go through the "fast path",
2567 * because there can be no reply when we haven't actually
2568 * gone out to the hardware.
2570 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2573 * Currently the only error included in the mask is
2574 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2575 * chain frames. We need to freeze the queue until we get
2576 * a command that completed without this error, which will
2577 * hopefully have some chain frames attached that we can
2578 * use. If we wanted to get smarter about it, we would
2579 * only unfreeze the queue in this condition when we're
2580 * sure that we're getting some chain frames back. That's
2581 * probably unnecessary.
2583 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2584 xpt_freeze_simq(sassc->sim, 1);
2585 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2586 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2587 "freezing SIM queue\n");
2592 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2593 * flag, and use it in a few places in the rest of this function for
2594 * convenience. Use the macro if available.
2596 #if __FreeBSD_version >= 1100103
2597 scsi_cdb = scsiio_cdb_ptr(csio);
2599 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2600 scsi_cdb = csio->cdb_io.cdb_ptr;
2602 scsi_cdb = csio->cdb_io.cdb_bytes;
2606 * If this is a Start Stop Unit command and it was issued by the driver
2607 * during shutdown, decrement the refcount to account for all of the
2608 * commands that were sent. All SSU commands should be completed before
2609 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2612 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2613 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2617 /* Take the fast path to completion */
2618 if (cm->cm_reply == NULL) {
2619 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2620 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2621 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2623 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2624 csio->scsi_status = SCSI_STATUS_OK;
2626 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2627 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2628 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2629 mpr_dprint(sc, MPR_XINFO,
2630 "Unfreezing SIM queue\n");
2635 * There are two scenarios where the status won't be
2636 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2637 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2639 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2641 * Freeze the dev queue so that commands are
2642 * executed in the correct order after error
2645 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2646 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2648 mpr_free_command(sc, cm);
2653 target = &sassc->targets[target_id];
2654 if (scsi_cdb[0] == UNMAP &&
2656 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2657 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2658 csio->scsi_status = rep->SCSIStatus;
2661 mprsas_log_command(cm, MPR_XINFO,
2662 "ioc %x scsi %x state %x xfer %u\n",
2663 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2664 le32toh(rep->TransferCount));
2666 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2667 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2668 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2670 case MPI2_IOCSTATUS_SUCCESS:
2671 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2672 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2673 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2674 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2676 /* Completion failed at the transport level. */
2677 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2678 MPI2_SCSI_STATE_TERMINATED)) {
2679 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2683 /* In a modern packetized environment, an autosense failure
2684 * implies that there's not much else that can be done to
2685 * recover the command.
2687 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2688 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2693 * CAM doesn't care about SAS Response Info data, but if this is
2694 * the state check if TLR should be done. If not, clear the
2695 * TLR_bits for the target.
2697 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2698 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2699 == MPR_SCSI_RI_INVALID_FRAME)) {
2700 sc->mapping_table[target_id].TLR_bits =
2701 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2705 * Intentionally override the normal SCSI status reporting
2706 * for these two cases. These are likely to happen in a
2707 * multi-initiator environment, and we want to make sure that
2708 * CAM retries these commands rather than fail them.
2710 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2711 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2712 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2716 /* Handle normal status and sense */
2717 csio->scsi_status = rep->SCSIStatus;
2718 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2719 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2721 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2723 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2724 int sense_len, returned_sense_len;
2726 returned_sense_len = min(le32toh(rep->SenseCount),
2727 sizeof(struct scsi_sense_data));
2728 if (returned_sense_len < csio->sense_len)
2729 csio->sense_resid = csio->sense_len -
2732 csio->sense_resid = 0;
2734 sense_len = min(returned_sense_len,
2735 csio->sense_len - csio->sense_resid);
2736 bzero(&csio->sense_data, sizeof(csio->sense_data));
2737 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2738 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2742 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2743 * and it's page code 0 (Supported Page List), and there is
2744 * inquiry data, and this is for a sequential access device, and
2745 * the device is an SSP target, and TLR is supported by the
2746 * controller, turn the TLR_bits value ON if page 0x90 is
2749 if ((scsi_cdb[0] == INQUIRY) &&
2750 (scsi_cdb[1] & SI_EVPD) &&
2751 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2752 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2753 (csio->data_ptr != NULL) &&
2754 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2755 (sc->control_TLR) &&
2756 (sc->mapping_table[target_id].device_info &
2757 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2758 vpd_list = (struct scsi_vpd_supported_page_list *)
2760 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2761 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2762 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2763 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2764 alloc_len -= csio->resid;
2765 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2766 if (vpd_list->list[i] == 0x90) {
2774 * If this is a SATA direct-access end device, mark it so that
2775 * a SCSI StartStopUnit command will be sent to it when the
2776 * driver is being shutdown.
2778 if ((scsi_cdb[0] == INQUIRY) &&
2779 (csio->data_ptr != NULL) &&
2780 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2781 (sc->mapping_table[target_id].device_info &
2782 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2783 ((sc->mapping_table[target_id].device_info &
2784 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2785 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2786 target = &sassc->targets[target_id];
2787 target->supports_SSU = TRUE;
2788 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2792 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2793 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2795 * If devinfo is 0 this will be a volume. In that case don't
2796 * tell CAM that the volume is not there. We want volumes to
2797 * be enumerated until they are deleted/removed, not just
2800 if (cm->cm_targ->devinfo == 0)
2801 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2803 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2805 case MPI2_IOCSTATUS_INVALID_SGL:
2806 mpr_print_scsiio_cmd(sc, cm);
2807 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2809 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2811 * This is one of the responses that comes back when an I/O
2812 * has been aborted. If it is because of a timeout that we
2813 * initiated, just set the status to CAM_CMD_TIMEOUT.
2814 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2815 * command is the same (it gets retried, subject to the
2816 * retry counter), the only difference is what gets printed
2819 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2820 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2822 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2824 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2825 /* resid is ignored for this condition */
2827 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2829 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2830 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2832 * These can sometimes be transient transport-related
2833 * errors, and sometimes persistent drive-related errors.
2834 * We used to retry these without decrementing the retry
2835 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2836 * we hit a persistent drive problem that returns one of
2837 * these error codes, we would retry indefinitely. So,
2838 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2839 * count and avoid infinite retries. We're taking the
2840 * potential risk of flagging false failures in the event
2841 * of a topology-related error (e.g. a SAS expander problem
2842 * causes a command addressed to a drive to fail), but
2843 * avoiding getting into an infinite retry loop.
2845 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2846 mpr_dprint(sc, MPR_INFO,
2847 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2848 mpr_describe_table(mpr_iocstatus_string,
2849 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2850 target_id, cm->cm_desc.Default.SMID,
2851 le32toh(rep->IOCLogInfo));
2852 mpr_dprint(sc, MPR_XINFO,
2853 "SCSIStatus %x SCSIState %x xfercount %u\n",
2854 rep->SCSIStatus, rep->SCSIState,
2855 le32toh(rep->TransferCount));
2857 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2858 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2859 case MPI2_IOCSTATUS_INVALID_VPID:
2860 case MPI2_IOCSTATUS_INVALID_FIELD:
2861 case MPI2_IOCSTATUS_INVALID_STATE:
2862 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2863 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2864 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2865 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2866 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2868 mprsas_log_command(cm, MPR_XINFO,
2869 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2870 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2871 rep->SCSIStatus, rep->SCSIState,
2872 le32toh(rep->TransferCount));
2873 csio->resid = cm->cm_length;
2875 if (scsi_cdb[0] == UNMAP &&
2877 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2878 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2880 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2885 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2887 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2888 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2889 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2890 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2894 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2895 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2896 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2899 mpr_free_command(sc, cm);
2903 #if __FreeBSD_version >= 900026
2905 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2907 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2908 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2912 ccb = cm->cm_complete_data;
2915 * Currently there should be no way we can hit this case. It only
2916 * happens when we have a failure to allocate chain frames, and SMP
2917 * commands require two S/G elements only. That should be handled
2918 * in the standard request size.
2920 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2921 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2922 "request!\n", __func__, cm->cm_flags);
2923 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2927 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2929 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2930 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2934 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2935 sasaddr = le32toh(req->SASAddress.Low);
2936 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2938 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2939 MPI2_IOCSTATUS_SUCCESS ||
2940 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2941 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2942 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2943 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2947 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2948 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2950 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2951 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2953 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2957 * We sync in both directions because we had DMAs in the S/G list
2958 * in both directions.
2960 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2961 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2962 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2963 mpr_free_command(sc, cm);
2968 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2970 struct mpr_command *cm;
2971 uint8_t *request, *response;
2972 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2973 struct mpr_softc *sc;
2981 #if (__FreeBSD_version >= 1000028) || \
2982 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2983 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2984 case CAM_DATA_PADDR:
2985 case CAM_DATA_SG_PADDR:
2987 * XXX We don't yet support physical addresses here.
2989 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2990 "supported\n", __func__);
2991 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2996 * The chip does not support more than one buffer for the
2997 * request or response.
2999 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3000 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3001 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3002 "response buffer segments not supported for SMP\n",
3004 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3010 * The CAM_SCATTER_VALID flag was originally implemented
3011 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3012 * We have two. So, just take that flag to mean that we
3013 * might have S/G lists, and look at the S/G segment count
3014 * to figure out whether that is the case for each individual
3017 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3018 bus_dma_segment_t *req_sg;
3020 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3021 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3023 request = ccb->smpio.smp_request;
3025 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3026 bus_dma_segment_t *rsp_sg;
3028 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3029 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3031 response = ccb->smpio.smp_response;
3033 case CAM_DATA_VADDR:
3034 request = ccb->smpio.smp_request;
3035 response = ccb->smpio.smp_response;
3038 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3042 #else /* __FreeBSD_version < 1000028 */
3044 * XXX We don't yet support physical addresses here.
3046 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3047 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3048 "supported\n", __func__);
3049 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3055 * If the user wants to send an S/G list, check to make sure they
3056 * have single buffers.
3058 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3060 * The chip does not support more than one buffer for the
3061 * request or response.
3063 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3064 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3065 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3066 "response buffer segments not supported for SMP\n",
3068 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3074 * The CAM_SCATTER_VALID flag was originally implemented
3075 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3076 * We have two. So, just take that flag to mean that we
3077 * might have S/G lists, and look at the S/G segment count
3078 * to figure out whether that is the case for each individual
3081 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3082 bus_dma_segment_t *req_sg;
3084 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3085 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3087 request = ccb->smpio.smp_request;
3089 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3090 bus_dma_segment_t *rsp_sg;
3092 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3093 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3095 response = ccb->smpio.smp_response;
3097 request = ccb->smpio.smp_request;
3098 response = ccb->smpio.smp_response;
3100 #endif /* __FreeBSD_version < 1000028 */
3102 cm = mpr_alloc_command(sc);
3104 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3106 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3111 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3112 bzero(req, sizeof(*req));
3113 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3115 /* Allow the chip to use any route to this SAS address. */
3116 req->PhysicalPort = 0xff;
3118 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3120 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3122 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3123 "%#jx\n", __func__, (uintmax_t)sasaddr);
3125 mpr_init_sge(cm, req, &req->SGL);
3128 * Set up a uio to pass into mpr_map_command(). This allows us to
3129 * do one map command, and one busdma call in there.
3131 cm->cm_uio.uio_iov = cm->cm_iovec;
3132 cm->cm_uio.uio_iovcnt = 2;
3133 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3136 * The read/write flag isn't used by busdma, but set it just in
3137 * case. This isn't exactly accurate, either, since we're going in
3140 cm->cm_uio.uio_rw = UIO_WRITE;
3142 cm->cm_iovec[0].iov_base = request;
3143 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3144 cm->cm_iovec[1].iov_base = response;
3145 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3147 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3148 cm->cm_iovec[1].iov_len;
3151 * Trigger a warning message in mpr_data_cb() for the user if we
3152 * wind up exceeding two S/G segments. The chip expects one
3153 * segment for the request and another for the response.
3155 cm->cm_max_segs = 2;
3157 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3158 cm->cm_complete = mprsas_smpio_complete;
3159 cm->cm_complete_data = ccb;
3162 * Tell the mapping code that we're using a uio, and that this is
3163 * an SMP passthrough request. There is a little special-case
3164 * logic there (in mpr_data_cb()) to handle the bidirectional
3167 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3168 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3170 /* The chip data format is little endian. */
3171 req->SASAddress.High = htole32(sasaddr >> 32);
3172 req->SASAddress.Low = htole32(sasaddr);
3175 * XXX Note that we don't have a timeout/abort mechanism here.
3176 * From the manual, it looks like task management requests only
3177 * work for SCSI IO and SATA passthrough requests. We may need to
3178 * have a mechanism to retry requests in the event of a chip reset
3179 * at least. Hopefully the chip will insure that any errors short
3180 * of that are relayed back to the driver.
3182 error = mpr_map_command(sc, cm);
3183 if ((error != 0) && (error != EINPROGRESS)) {
3184 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3185 "mpr_map_command()\n", __func__, error);
3192 mpr_free_command(sc, cm);
3193 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3199 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3201 struct mpr_softc *sc;
3202 struct mprsas_target *targ;
3203 uint64_t sasaddr = 0;
3208 * Make sure the target exists.
3210 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3211 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3212 targ = &sassc->targets[ccb->ccb_h.target_id];
3213 if (targ->handle == 0x0) {
3214 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3215 __func__, ccb->ccb_h.target_id);
3216 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3222 * If this device has an embedded SMP target, we'll talk to it
3224 * figure out what the expander's address is.
3226 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3227 sasaddr = targ->sasaddr;
3230 * If we don't have a SAS address for the expander yet, try
3231 * grabbing it from the page 0x83 information cached in the
3232 * transport layer for this target. LSI expanders report the
3233 * expander SAS address as the port-associated SAS address in
3234 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3237 * XXX KDM disable this for now, but leave it commented out so that
3238 * it is obvious that this is another possible way to get the SAS
3241 * The parent handle method below is a little more reliable, and
3242 * the other benefit is that it works for devices other than SES
3243 * devices. So you can send a SMP request to a da(4) device and it
3244 * will get routed to the expander that device is attached to.
3245 * (Assuming the da(4) device doesn't contain an SMP target...)
3249 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3253 * If we still don't have a SAS address for the expander, look for
3254 * the parent device of this device, which is probably the expander.
3257 #ifdef OLD_MPR_PROBE
3258 struct mprsas_target *parent_target;
3261 if (targ->parent_handle == 0x0) {
3262 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3263 "a valid parent handle!\n", __func__, targ->handle);
3264 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3267 #ifdef OLD_MPR_PROBE
3268 parent_target = mprsas_find_target_by_handle(sassc, 0,
3269 targ->parent_handle);
3271 if (parent_target == NULL) {
3272 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3273 "a valid parent target!\n", __func__, targ->handle);
3274 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3278 if ((parent_target->devinfo &
3279 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3280 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3281 "does not have an SMP target!\n", __func__,
3282 targ->handle, parent_target->handle);
3283 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3287 sasaddr = parent_target->sasaddr;
3288 #else /* OLD_MPR_PROBE */
3289 if ((targ->parent_devinfo &
3290 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3291 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3292 "does not have an SMP target!\n", __func__,
3293 targ->handle, targ->parent_handle);
3294 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3298 if (targ->parent_sasaddr == 0x0) {
3299 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3300 "%d does not have a valid SAS address!\n", __func__,
3301 targ->handle, targ->parent_handle);
3302 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3306 sasaddr = targ->parent_sasaddr;
3307 #endif /* OLD_MPR_PROBE */
3312 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3313 "handle %d\n", __func__, targ->handle);
3314 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3317 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3325 #endif //__FreeBSD_version >= 900026
3328 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3330 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3331 struct mpr_softc *sc;
3332 struct mpr_command *tm;
3333 struct mprsas_target *targ;
3335 MPR_FUNCTRACE(sassc->sc);
3336 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3338 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3339 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3341 tm = mprsas_alloc_tm(sc);
3343 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3344 "mprsas_action_resetdev\n");
3345 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3350 targ = &sassc->targets[ccb->ccb_h.target_id];
3351 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3352 req->DevHandle = htole16(targ->handle);
3353 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3355 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3356 /* SAS Hard Link Reset / SATA Link Reset */
3357 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3359 /* PCIe Protocol Level Reset*/
3361 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3365 tm->cm_complete = mprsas_resetdev_complete;
3366 tm->cm_complete_data = ccb;
3368 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3369 __func__, targ->tid);
3372 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3373 mpr_map_command(sc, tm);
3377 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3379 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3383 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3385 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3386 ccb = tm->cm_complete_data;
3389 * Currently there should be no way we can hit this case. It only
3390 * happens when we have a failure to allocate chain frames, and
3391 * task management commands don't have S/G lists.
3393 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3394 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3396 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3398 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3399 "handle %#04x! This should not happen!\n", __func__,
3400 tm->cm_flags, req->DevHandle);
3401 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3405 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3406 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3408 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3409 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3410 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3414 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3418 mprsas_free_tm(sc, tm);
3423 mprsas_poll(struct cam_sim *sim)
3425 struct mprsas_softc *sassc;
3427 sassc = cam_sim_softc(sim);
3429 if (sassc->sc->mpr_debug & MPR_TRACE) {
3430 /* frequent debug messages during a panic just slow
3431 * everything down too much.
3433 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3435 sassc->sc->mpr_debug &= ~MPR_TRACE;
3438 mpr_intr_locked(sassc->sc);
3442 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3445 struct mpr_softc *sc;
3447 sc = (struct mpr_softc *)callback_arg;
3450 #if (__FreeBSD_version >= 1000006) || \
3451 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3452 case AC_ADVINFO_CHANGED: {
3453 struct mprsas_target *target;
3454 struct mprsas_softc *sassc;
3455 struct scsi_read_capacity_data_long rcap_buf;
3456 struct ccb_dev_advinfo cdai;
3457 struct mprsas_lun *lun;
3462 buftype = (uintptr_t)arg;
3468 * We're only interested in read capacity data changes.
3470 if (buftype != CDAI_TYPE_RCAPLONG)
3474 * See the comment in mpr_attach_sas() for a detailed
3475 * explanation. In these versions of FreeBSD we register
3476 * for all events and filter out the events that don't
3479 #if (__FreeBSD_version < 1000703) || \
3480 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3481 if (xpt_path_path_id(path) != sassc->sim->path_id)
3486 * We should have a handle for this, but check to make sure.
3488 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3489 ("Target %d out of bounds in mprsas_async\n",
3490 xpt_path_target_id(path)));
3491 target = &sassc->targets[xpt_path_target_id(path)];
3492 if (target->handle == 0)
3495 lunid = xpt_path_lun_id(path);
3497 SLIST_FOREACH(lun, &target->luns, lun_link) {
3498 if (lun->lun_id == lunid) {
3504 if (found_lun == 0) {
3505 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3508 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3509 "LUN for EEDP support.\n");
3512 lun->lun_id = lunid;
3513 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3516 bzero(&rcap_buf, sizeof(rcap_buf));
3517 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3518 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3519 cdai.ccb_h.flags = CAM_DIR_IN;
3520 cdai.buftype = CDAI_TYPE_RCAPLONG;
3521 #if (__FreeBSD_version >= 1100061) || \
3522 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3523 cdai.flags = CDAI_FLAG_NONE;
3527 cdai.bufsiz = sizeof(rcap_buf);
3528 cdai.buf = (uint8_t *)&rcap_buf;
3529 xpt_action((union ccb *)&cdai);
3530 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3531 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3533 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3534 && (rcap_buf.prot & SRC16_PROT_EN)) {
3535 switch (rcap_buf.prot & SRC16_P_TYPE) {
3538 lun->eedp_formatted = TRUE;
3539 lun->eedp_block_size =
3540 scsi_4btoul(rcap_buf.length);
3544 lun->eedp_formatted = FALSE;
3545 lun->eedp_block_size = 0;
3549 lun->eedp_formatted = FALSE;
3550 lun->eedp_block_size = 0;
3555 case AC_FOUND_DEVICE: {
3556 struct ccb_getdev *cgd;
3559 * See the comment in mpr_attach_sas() for a detailed
3560 * explanation. In these versions of FreeBSD we register
3561 * for all events and filter out the events that don't
3564 #if (__FreeBSD_version < 1000703) || \
3565 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3566 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3571 #if (__FreeBSD_version < 901503) || \
3572 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3573 mprsas_check_eedp(sc, path, cgd);
3582 #if (__FreeBSD_version < 901503) || \
3583 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3585 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3586 struct ccb_getdev *cgd)
3588 struct mprsas_softc *sassc = sc->sassc;
3589 struct ccb_scsiio *csio;
3590 struct scsi_read_capacity_16 *scsi_cmd;
3591 struct scsi_read_capacity_eedp *rcap_buf;
3593 target_id_t targetid;
3596 struct cam_path *local_path;
3597 struct mprsas_target *target;
3598 struct mprsas_lun *lun;
3602 pathid = cam_sim_path(sassc->sim);
3603 targetid = xpt_path_target_id(path);
3604 lunid = xpt_path_lun_id(path);
3606 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3607 "mprsas_check_eedp\n", targetid));
3608 target = &sassc->targets[targetid];
3609 if (target->handle == 0x0)
3613 * Determine if the device is EEDP capable.
3615 * If this flag is set in the inquiry data, the device supports
3616 * protection information, and must support the 16 byte read capacity
3617 * command, otherwise continue without sending read cap 16.
3619 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3623 * Issue a READ CAPACITY 16 command. This info is used to determine if
3624 * the LUN is formatted for EEDP support.
3626 ccb = xpt_alloc_ccb_nowait();
3628 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3633 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3635 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3642 * If LUN is already in list, don't create a new one.
3645 SLIST_FOREACH(lun, &target->luns, lun_link) {
3646 if (lun->lun_id == lunid) {
3652 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3655 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3657 xpt_free_path(local_path);
3661 lun->lun_id = lunid;
3662 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3665 xpt_path_string(local_path, path_str, sizeof(path_str));
3666 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3667 path_str, target->handle);
3670 * Issue a READ CAPACITY 16 command for the LUN. The
3671 * mprsas_read_cap_done function will load the read cap info into the
3674 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3676 if (rcap_buf == NULL) {
3677 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3678 "buffer for EEDP support.\n");
3679 xpt_free_path(ccb->ccb_h.path);
3683 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3685 csio->ccb_h.func_code = XPT_SCSI_IO;
3686 csio->ccb_h.flags = CAM_DIR_IN;
3687 csio->ccb_h.retry_count = 4;
3688 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3689 csio->ccb_h.timeout = 60000;
3690 csio->data_ptr = (uint8_t *)rcap_buf;
3691 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3692 csio->sense_len = MPR_SENSE_LEN;
3693 csio->cdb_len = sizeof(*scsi_cmd);
3694 csio->tag_action = MSG_SIMPLE_Q_TAG;
3696 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3697 bzero(scsi_cmd, sizeof(*scsi_cmd));
3698 scsi_cmd->opcode = 0x9E;
3699 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3700 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3702 ccb->ccb_h.ppriv_ptr1 = sassc;
3707 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3709 struct mprsas_softc *sassc;
3710 struct mprsas_target *target;
3711 struct mprsas_lun *lun;
3712 struct scsi_read_capacity_eedp *rcap_buf;
3714 if (done_ccb == NULL)
3717 /* Driver need to release devq, it Scsi command is
3718 * generated by driver internally.
3719 * Currently there is a single place where driver
3720 * calls scsi command internally. In future if driver
3721 * calls more scsi command internally, it needs to release
3722 * devq internally, since those command will not go back to
3725 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3726 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3727 xpt_release_devq(done_ccb->ccb_h.path,
3728 /*count*/ 1, /*run_queue*/TRUE);
3731 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3734 * Get the LUN ID for the path and look it up in the LUN list for the
3737 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3738 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3739 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3740 target = &sassc->targets[done_ccb->ccb_h.target_id];
3741 SLIST_FOREACH(lun, &target->luns, lun_link) {
3742 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3746 * Got the LUN in the target's LUN list. Fill it in with EEDP
3747 * info. If the READ CAP 16 command had some SCSI error (common
3748 * if command is not supported), mark the lun as not supporting
3749 * EEDP and set the block size to 0.
3751 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3752 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3753 lun->eedp_formatted = FALSE;
3754 lun->eedp_block_size = 0;
3758 if (rcap_buf->protect & 0x01) {
3759 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3760 "%d is formatted for EEDP support.\n",
3761 done_ccb->ccb_h.target_lun,
3762 done_ccb->ccb_h.target_id);
3763 lun->eedp_formatted = TRUE;
3764 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3769 // Finished with this CCB and path.
3770 free(rcap_buf, M_MPR);
3771 xpt_free_path(done_ccb->ccb_h.path);
3772 xpt_free_ccb(done_ccb);
3774 #endif /* (__FreeBSD_version < 901503) || \
3775 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3778 * Set the INRESET flag for this target so that no I/O will be sent to
3779 * the target until the reset has completed. If an I/O request does
3780 * happen, the devq will be frozen. The CCB holds the path which is
3781 * used to release the devq. The devq is released and the CCB is freed
3782 * when the TM completes.
3785 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3786 struct mprsas_target *target, lun_id_t lun_id)
3791 ccb = xpt_alloc_ccb_nowait();
3793 path_id = cam_sim_path(sc->sassc->sim);
3794 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3795 target->tid, lun_id) != CAM_REQ_CMP) {
3799 tm->cm_targ = target;
3800 target->flags |= MPRSAS_TARGET_INRESET;
3806 mprsas_startup(struct mpr_softc *sc)
3809 * Send the port enable message and set the wait_for_port_enable flag.
3810 * This flag helps to keep the simq frozen until all discovery events
3813 sc->wait_for_port_enable = 1;
3814 mprsas_send_portenable(sc);
3819 mprsas_send_portenable(struct mpr_softc *sc)
3821 MPI2_PORT_ENABLE_REQUEST *request;
3822 struct mpr_command *cm;
3826 if ((cm = mpr_alloc_command(sc)) == NULL)
3828 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3829 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3830 request->MsgFlags = 0;
3832 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3833 cm->cm_complete = mprsas_portenable_complete;
3837 mpr_map_command(sc, cm);
3838 mpr_dprint(sc, MPR_XINFO,
3839 "mpr_send_portenable finished cm %p req %p complete %p\n",
3840 cm, cm->cm_req, cm->cm_complete);
3845 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3847 MPI2_PORT_ENABLE_REPLY *reply;
3848 struct mprsas_softc *sassc;
3854 * Currently there should be no way we can hit this case. It only
3855 * happens when we have a failure to allocate chain frames, and
3856 * port enable commands don't have S/G lists.
3858 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3859 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3860 "This should not happen!\n", __func__, cm->cm_flags);
3863 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3865 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3866 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3867 MPI2_IOCSTATUS_SUCCESS)
3868 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3870 mpr_free_command(sc, cm);
3872 * Done waiting for port enable to complete. Decrement the refcount.
3873 * If refcount is 0, discovery is complete and a rescan of the bus can
3876 sc->wait_for_port_enable = 0;
3877 sc->port_enable_complete = 1;
3878 wakeup(&sc->port_enable_complete);
3879 mprsas_startup_decrement(sassc);
3883 mprsas_check_id(struct mprsas_softc *sassc, int id)
3885 struct mpr_softc *sc = sassc->sc;
3889 ids = &sc->exclude_ids[0];
3890 while((name = strsep(&ids, ",")) != NULL) {
3891 if (name[0] == '\0')
3893 if (strtol(name, NULL, 0) == (long)id)
3901 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3903 struct mprsas_softc *sassc;
3904 struct mprsas_lun *lun, *lun_tmp;
3905 struct mprsas_target *targ;
3910 * The number of targets is based on IOC Facts, so free all of
3911 * the allocated LUNs for each target and then the target buffer
3914 for (i=0; i< maxtargets; i++) {
3915 targ = &sassc->targets[i];
3916 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3920 free(sassc->targets, M_MPR);
3922 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3923 M_MPR, M_WAITOK|M_ZERO);
3924 if (!sassc->targets) {
3925 panic("%s failed to alloc targets with error %d\n",