2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for Avago Technologies (LSI) MPT3 */
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
75 #include <dev/nvme/nvme.h>
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
90 #define MPRSAS_DISCOVERY_TIMEOUT 20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
94 * static array to check SCSI OpCode for EEDP protection bits
96 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131 struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133 struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137 struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139 union ccb *done_ccb);
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143 struct mpr_command *cm);
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
156 struct mprsas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mprsas_startup_increment(struct mprsas_softc *sassc)
178 MPR_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mpr_dprint(sassc->sc, MPR_INIT,
184 "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
224 mprsas_rescan_target(sassc->sc, NULL);
227 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 sassc->startup_refcount);
232 /* The firmware requires us to stop sending commands when we're doing task
233 * management, so refcount the TMs and keep the simq frozen when any are in
237 mprsas_alloc_tm(struct mpr_softc *sc)
239 struct mpr_command *tm;
242 tm = mpr_alloc_high_priority_command(sc);
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
249 int target_id = 0xFFFFFFFF;
256 * For TM's the devq is frozen for the device. Unfreeze it here and
257 * free the resources used for freezing the devq. Must clear the
258 * INRESET flag as well or scsi I/O will not work.
260 if (tm->cm_targ != NULL) {
261 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 target_id = tm->cm_targ->tid;
265 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
267 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 xpt_free_path(tm->cm_ccb->ccb_h.path);
269 xpt_free_ccb(tm->cm_ccb);
272 mpr_free_high_priority_command(sc, tm);
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
278 struct mprsas_softc *sassc = sc->sassc;
280 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = xpt_alloc_ccb_nowait();
295 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
299 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
306 if (targetid == CAM_TARGET_WILDCARD)
307 ccb->ccb_h.func_code = XPT_SCAN_BUS;
309 ccb->ccb_h.func_code = XPT_SCAN_TGT;
311 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
326 /* No need to be in here if debugging isn't enabled */
327 if ((cm->cm_sc->mpr_debug & level) == 0)
330 sbuf_new(&sb, str, sizeof(str), 0);
334 if (cm->cm_ccb != NULL) {
335 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
337 sbuf_cat(&sb, path_str);
338 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 scsi_command_string(&cm->cm_ccb->csio, &sb);
340 sbuf_printf(&sb, "length %d ",
341 cm->cm_ccb->csio.dxfer_len);
344 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 cam_sim_name(cm->cm_sc->sassc->sim),
346 cam_sim_unit(cm->cm_sc->sassc->sim),
347 cam_sim_bus(cm->cm_sc->sassc->sim),
348 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
352 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 sbuf_vprintf(&sb, fmt, ap);
355 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
363 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 struct mprsas_target *targ;
369 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
374 /* XXX retry the remove after the diag reset completes? */
375 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 "0x%04x\n", __func__, handle);
377 mprsas_free_tm(sc, tm);
381 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 MPI2_IOCSTATUS_SUCCESS) {
383 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
387 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 le32toh(reply->TerminationCount));
389 mpr_free_reply(sc, tm->cm_reply_data);
390 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
392 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
396 * Don't clear target if remove fails because things will get confusing.
397 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 * this target id if possible, and so we can assign the same target id
399 * to this device if it comes back in the future.
401 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 MPI2_IOCSTATUS_SUCCESS) {
405 targ->encl_handle = 0x0;
406 targ->encl_level_valid = 0x0;
407 targ->encl_level = 0x0;
408 targ->connector_name[0] = ' ';
409 targ->connector_name[1] = ' ';
410 targ->connector_name[2] = ' ';
411 targ->connector_name[3] = ' ';
412 targ->encl_slot = 0x0;
413 targ->exp_dev_handle = 0x0;
415 targ->linkrate = 0x0;
418 targ->scsi_req_desc_type = 0;
421 mprsas_free_tm(sc, tm);
426 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427 * Otherwise Volume Delete is same as Bare Drive Removal.
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
432 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 struct mpr_softc *sc;
434 struct mpr_command *cm;
435 struct mprsas_target *targ = NULL;
437 MPR_FUNCTRACE(sassc->sc);
440 targ = mprsas_find_target_by_handle(sassc, 0, handle);
442 /* FIXME: what is the action? */
443 /* We don't know about this device? */
444 mpr_dprint(sc, MPR_ERROR,
445 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449 targ->flags |= MPRSAS_TARGET_INREMOVAL;
451 cm = mprsas_alloc_tm(sc);
453 mpr_dprint(sc, MPR_ERROR,
454 "%s: command alloc failure\n", __func__);
458 mprsas_rescan_target(sc, targ);
460 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 req->DevHandle = targ->handle;
462 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
465 /* SAS Hard Link Reset / SATA Link Reset */
466 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470 cm->cm_desc.HighPriority.RequestFlags =
471 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 cm->cm_complete = mprsas_remove_volume;
473 cm->cm_complete_data = (void *)(uintptr_t)handle;
475 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 __func__, targ->tid);
477 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
479 mpr_map_command(sc, cm);
483 * The firmware performs debounce on the link to avoid transient link errors
484 * and false removals. When it does decide that link has been lost and a
485 * device needs to go away, it expects that the host will perform a target reset
486 * and then an op remove. The reset has the side-effect of aborting any
487 * outstanding requests for the device, which is required for the op-remove to
488 * succeed. It's not clear if the host should check for the device coming back
489 * alive after the reset.
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
494 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 struct mpr_softc *sc;
496 struct mpr_command *cm;
497 struct mprsas_target *targ = NULL;
499 MPR_FUNCTRACE(sassc->sc);
503 targ = mprsas_find_target_by_handle(sassc, 0, handle);
505 /* FIXME: what is the action? */
506 /* We don't know about this device? */
507 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
512 targ->flags |= MPRSAS_TARGET_INREMOVAL;
514 cm = mprsas_alloc_tm(sc);
516 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
521 mprsas_rescan_target(sc, targ);
523 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 memset(req, 0, sizeof(*req));
525 req->DevHandle = htole16(targ->handle);
526 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
529 /* SAS Hard Link Reset / SATA Link Reset */
530 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
534 cm->cm_desc.HighPriority.RequestFlags =
535 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 cm->cm_complete = mprsas_remove_device;
537 cm->cm_complete_data = (void *)(uintptr_t)handle;
539 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 __func__, targ->tid);
541 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
543 mpr_map_command(sc, cm);
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
549 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 struct mprsas_target *targ;
552 struct mpr_command *next_cm;
557 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
562 * Currently there should be no way we can hit this case. It only
563 * happens when we have a failure to allocate chain frames, and
564 * task management commands don't have S/G lists.
566 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 "handle %#04x! This should not happen!\n", __func__,
569 tm->cm_flags, handle);
573 /* XXX retry the remove after the diag reset completes? */
574 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 "0x%04x\n", __func__, handle);
576 mprsas_free_tm(sc, tm);
580 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 MPI2_IOCSTATUS_SUCCESS) {
582 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
586 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 le32toh(reply->TerminationCount));
588 mpr_free_reply(sc, tm->cm_reply_data);
589 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
591 /* Reuse the existing command */
592 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 memset(req, 0, sizeof(*req));
594 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 req->DevHandle = htole16(handle);
598 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 tm->cm_complete = mprsas_remove_complete;
600 tm->cm_complete_data = (void *)(uintptr_t)handle;
602 mpr_map_command(sc, tm);
604 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
606 if (targ->encl_level_valid) {
607 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 targ->connector_name);
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mprsas_scsiio_complete(sc, tm);
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
626 struct mprsas_target *targ;
627 struct mprsas_lun *lun;
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
639 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 "handle %#04x! This should not happen!\n", __func__,
642 tm->cm_flags, handle);
643 mprsas_free_tm(sc, tm);
648 /* most likely a chip reset */
649 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 "0x%04x\n", __func__, handle);
651 mprsas_free_tm(sc, tm);
655 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 __func__, handle, le16toh(reply->IOCStatus));
659 * Don't clear target if remove fails because things will get confusing.
660 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 * this target id if possible, and so we can assign the same target id
662 * to this device if it comes back in the future.
664 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 MPI2_IOCSTATUS_SUCCESS) {
668 targ->encl_handle = 0x0;
669 targ->encl_level_valid = 0x0;
670 targ->encl_level = 0x0;
671 targ->connector_name[0] = ' ';
672 targ->connector_name[1] = ' ';
673 targ->connector_name[2] = ' ';
674 targ->connector_name[3] = ' ';
675 targ->encl_slot = 0x0;
676 targ->exp_dev_handle = 0x0;
678 targ->linkrate = 0x0;
681 targ->scsi_req_desc_type = 0;
683 while (!SLIST_EMPTY(&targ->luns)) {
684 lun = SLIST_FIRST(&targ->luns);
685 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
690 mprsas_free_tm(sc, tm);
694 mprsas_register_events(struct mpr_softc *sc)
699 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 setbit(events, MPI2_EVENT_IR_VOLUME);
708 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
720 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 &sc->sassc->mprsas_eh);
727 mpr_attach_sas(struct mpr_softc *sc)
729 struct mprsas_softc *sassc;
734 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
736 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
738 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
739 "Cannot allocate SAS subsystem memory\n");
744 * XXX MaxTargets could change during a reinit. Since we don't
745 * resize the targets[] array during such an event, cache the value
746 * of MaxTargets here so that we don't get into trouble later. This
747 * should move into the reinit logic.
749 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
750 sassc->targets = malloc(sizeof(struct mprsas_target) *
751 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
752 if (!sassc->targets) {
753 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
754 "Cannot allocate SAS target memory\n");
761 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
762 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
767 unit = device_get_unit(sc->mpr_dev);
768 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
769 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
770 if (sassc->sim == NULL) {
771 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
776 TAILQ_INIT(&sassc->ev_queue);
778 /* Initialize taskqueue for Event Handling */
779 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
780 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
781 taskqueue_thread_enqueue, &sassc->ev_tq);
782 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
783 device_get_nameunit(sc->mpr_dev));
788 * XXX There should be a bus for every port on the adapter, but since
789 * we're just going to fake the topology for now, we'll pretend that
790 * everything is just a target on a single bus.
792 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
793 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
794 "Error %d registering SCSI bus\n", error);
800 * Assume that discovery events will start right away.
802 * Hold off boot until discovery is complete.
804 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
805 sc->sassc->startup_refcount = 0;
806 mprsas_startup_increment(sassc);
808 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
811 * Register for async events so we can determine the EEDP
812 * capabilities of devices.
814 status = xpt_create_path(&sassc->path, /*periph*/NULL,
815 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
817 if (status != CAM_REQ_CMP) {
818 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
819 "Error %#x creating sim path\n", status);
824 #if (__FreeBSD_version >= 1000006) || \
825 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
826 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
828 event = AC_FOUND_DEVICE;
832 * Prior to the CAM locking improvements, we can't call
833 * xpt_register_async() with a particular path specified.
835 * If a path isn't specified, xpt_register_async() will
836 * generate a wildcard path and acquire the XPT lock while
837 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
838 * It will then drop the XPT lock once that is done.
840 * If a path is specified for xpt_register_async(), it will
841 * not acquire and drop the XPT lock around the call to
842 * xpt_action(). xpt_action() asserts that the caller
843 * holds the SIM lock, so the SIM lock has to be held when
844 * calling xpt_register_async() when the path is specified.
846 * But xpt_register_async calls xpt_for_all_devices(),
847 * which calls xptbustraverse(), which will acquire each
848 * SIM lock. When it traverses our particular bus, it will
849 * necessarily acquire the SIM lock, which will lead to a
850 * recursive lock acquisition.
852 * The CAM locking changes fix this problem by acquiring
853 * the XPT topology lock around bus traversal in
854 * xptbustraverse(), so the caller can hold the SIM lock
855 * and it does not cause a recursive lock acquisition.
857 * These __FreeBSD_version values are approximate, especially
858 * for stable/10, which is two months later than the actual
862 #if (__FreeBSD_version < 1000703) || \
863 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
865 status = xpt_register_async(event, mprsas_async, sc,
869 status = xpt_register_async(event, mprsas_async, sc,
873 if (status != CAM_REQ_CMP) {
874 mpr_dprint(sc, MPR_ERROR,
875 "Error %#x registering async handler for "
876 "AC_ADVINFO_CHANGED events\n", status);
877 xpt_free_path(sassc->path);
881 if (status != CAM_REQ_CMP) {
883 * EEDP use is the exception, not the rule.
884 * Warn the user, but do not fail to attach.
886 mpr_printf(sc, "EEDP capabilities disabled.\n");
891 mprsas_register_events(sc);
896 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
901 mpr_detach_sas(struct mpr_softc *sc)
903 struct mprsas_softc *sassc;
904 struct mprsas_lun *lun, *lun_tmp;
905 struct mprsas_target *targ;
910 if (sc->sassc == NULL)
914 mpr_deregister_events(sc, sassc->mprsas_eh);
917 * Drain and free the event handling taskqueue with the lock
918 * unheld so that any parallel processing tasks drain properly
919 * without deadlocking.
921 if (sassc->ev_tq != NULL)
922 taskqueue_free(sassc->ev_tq);
924 /* Make sure CAM doesn't wedge if we had to bail out early. */
927 /* Deregister our async handler */
928 if (sassc->path != NULL) {
929 xpt_register_async(0, mprsas_async, sc, sassc->path);
930 xpt_free_path(sassc->path);
934 if (sassc->flags & MPRSAS_IN_STARTUP)
935 xpt_release_simq(sassc->sim, 1);
937 if (sassc->sim != NULL) {
938 xpt_bus_deregister(cam_sim_path(sassc->sim));
939 cam_sim_free(sassc->sim, FALSE);
944 if (sassc->devq != NULL)
945 cam_simq_free(sassc->devq);
947 for (i = 0; i < sassc->maxtargets; i++) {
948 targ = &sassc->targets[i];
949 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
953 free(sassc->targets, M_MPR);
961 mprsas_discovery_end(struct mprsas_softc *sassc)
963 struct mpr_softc *sc = sassc->sc;
967 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
968 callout_stop(&sassc->discovery_callout);
971 * After discovery has completed, check the mapping table for any
972 * missing devices and update their missing counts. Only do this once
973 * whenever the driver is initialized so that missing counts aren't
974 * updated unnecessarily. Note that just because discovery has
975 * completed doesn't mean that events have been processed yet. The
976 * check_devices function is a callout timer that checks if ALL devices
977 * are missing. If so, it will wait a little longer for events to
978 * complete and keep resetting itself until some device in the mapping
979 * table is not missing, meaning that event processing has started.
981 if (sc->track_mapping_events) {
982 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
983 "completed. Check for missing devices in the mapping "
985 callout_reset(&sc->device_check_callout,
986 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
992 mprsas_action(struct cam_sim *sim, union ccb *ccb)
994 struct mprsas_softc *sassc;
996 sassc = cam_sim_softc(sim);
998 MPR_FUNCTRACE(sassc->sc);
999 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1000 ccb->ccb_h.func_code);
1001 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1003 switch (ccb->ccb_h.func_code) {
1006 struct ccb_pathinq *cpi = &ccb->cpi;
1007 struct mpr_softc *sc = sassc->sc;
1008 uint8_t sges_per_frame;
1010 cpi->version_num = 1;
1011 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1012 cpi->target_sprt = 0;
1013 #if (__FreeBSD_version >= 1000039) || \
1014 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1015 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1017 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1019 cpi->hba_eng_cnt = 0;
1020 cpi->max_target = sassc->maxtargets - 1;
1024 * initiator_id is set here to an ID outside the set of valid
1025 * target IDs (including volumes).
1027 cpi->initiator_id = sassc->maxtargets;
1028 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1029 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1030 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1031 cpi->unit_number = cam_sim_unit(sim);
1032 cpi->bus_id = cam_sim_bus(sim);
1034 * XXXSLM-I think this needs to change based on config page or
1035 * something instead of hardcoded to 150000.
1037 cpi->base_transfer_speed = 150000;
1038 cpi->transport = XPORT_SAS;
1039 cpi->transport_version = 0;
1040 cpi->protocol = PROTO_SCSI;
1041 cpi->protocol_version = SCSI_REV_SPC;
1044 * Max IO Size is Page Size * the following:
1045 * ((SGEs per frame - 1 for chain element) *
1046 * Max Chain Depth) + 1 for no chain needed in last frame
1048 * If user suggests a Max IO size to use, use the smaller of the
1049 * user's value and the calculated value as long as the user's
1050 * value is larger than 0. The user's value is in pages.
1052 sges_per_frame = (sc->chain_frame_size /
1053 sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1054 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1055 cpi->maxio *= PAGE_SIZE;
1056 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1058 cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1059 sc->maxio = cpi->maxio;
1060 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1063 case XPT_GET_TRAN_SETTINGS:
1065 struct ccb_trans_settings *cts;
1066 struct ccb_trans_settings_sas *sas;
1067 struct ccb_trans_settings_scsi *scsi;
1068 struct mprsas_target *targ;
1071 sas = &cts->xport_specific.sas;
1072 scsi = &cts->proto_specific.scsi;
1074 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1075 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1076 cts->ccb_h.target_id));
1077 targ = &sassc->targets[cts->ccb_h.target_id];
1078 if (targ->handle == 0x0) {
1079 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1083 cts->protocol_version = SCSI_REV_SPC2;
1084 cts->transport = XPORT_SAS;
1085 cts->transport_version = 0;
1087 sas->valid = CTS_SAS_VALID_SPEED;
1088 switch (targ->linkrate) {
1090 sas->bitrate = 150000;
1093 sas->bitrate = 300000;
1096 sas->bitrate = 600000;
1099 sas->bitrate = 1200000;
1105 cts->protocol = PROTO_SCSI;
1106 scsi->valid = CTS_SCSI_VALID_TQ;
1107 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1109 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1112 case XPT_CALC_GEOMETRY:
1113 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1114 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1117 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1119 mprsas_action_resetdev(sassc, ccb);
1124 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1125 "for abort or reset\n");
1126 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1129 mprsas_action_scsiio(sassc, ccb);
1131 #if __FreeBSD_version >= 900026
1133 mprsas_action_smpio(sassc, ccb);
1137 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1145 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1146 target_id_t target_id, lun_id_t lun_id)
1148 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1149 struct cam_path *path;
1151 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1152 ac_code, target_id, (uintmax_t)lun_id);
1154 if (xpt_create_path(&path, NULL,
1155 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1156 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1161 xpt_async(ac_code, path, NULL);
1162 xpt_free_path(path);
1166 mprsas_complete_all_commands(struct mpr_softc *sc)
1168 struct mpr_command *cm;
1173 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1175 /* complete all commands with a NULL reply */
1176 for (i = 1; i < sc->num_reqs; i++) {
1177 cm = &sc->commands[i];
1178 cm->cm_reply = NULL;
1181 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1182 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1184 if (cm->cm_complete != NULL) {
1185 mprsas_log_command(cm, MPR_RECOVERY,
1186 "completing cm %p state %x ccb %p for diag reset\n",
1187 cm, cm->cm_state, cm->cm_ccb);
1188 cm->cm_complete(sc, cm);
1192 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1193 mprsas_log_command(cm, MPR_RECOVERY,
1194 "waking up cm %p state %x ccb %p for diag reset\n",
1195 cm, cm->cm_state, cm->cm_ccb);
1200 if (cm->cm_sc->io_cmds_active != 0)
1201 cm->cm_sc->io_cmds_active--;
1203 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1204 /* this should never happen, but if it does, log */
1205 mprsas_log_command(cm, MPR_RECOVERY,
1206 "cm %p state %x flags 0x%x ccb %p during diag "
1207 "reset\n", cm, cm->cm_state, cm->cm_flags,
1214 mprsas_handle_reinit(struct mpr_softc *sc)
1218 /* Go back into startup mode and freeze the simq, so that CAM
1219 * doesn't send any commands until after we've rediscovered all
1220 * targets and found the proper device handles for them.
1222 * After the reset, portenable will trigger discovery, and after all
1223 * discovery-related activities have finished, the simq will be
1226 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1227 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1228 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1229 mprsas_startup_increment(sc->sassc);
1231 /* notify CAM of a bus reset */
1232 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1235 /* complete and cleanup after all outstanding commands */
1236 mprsas_complete_all_commands(sc);
1238 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1239 __func__, sc->sassc->startup_refcount);
1241 /* zero all the target handles, since they may change after the
1242 * reset, and we have to rediscover all the targets and use the new
1245 for (i = 0; i < sc->sassc->maxtargets; i++) {
1246 if (sc->sassc->targets[i].outstanding != 0)
1247 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1248 i, sc->sassc->targets[i].outstanding);
1249 sc->sassc->targets[i].handle = 0x0;
1250 sc->sassc->targets[i].exp_dev_handle = 0x0;
1251 sc->sassc->targets[i].outstanding = 0;
1252 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1256 mprsas_tm_timeout(void *data)
1258 struct mpr_command *tm = data;
1259 struct mpr_softc *sc = tm->cm_sc;
1261 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1263 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1269 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1271 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1272 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1273 unsigned int cm_count = 0;
1274 struct mpr_command *cm;
1275 struct mprsas_target *targ;
1277 callout_stop(&tm->cm_callout);
1279 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1280 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1284 * Currently there should be no way we can hit this case. It only
1285 * happens when we have a failure to allocate chain frames, and
1286 * task management commands don't have S/G lists.
1288 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1289 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1290 "%s: cm_flags = %#x for LUN reset! "
1291 "This should not happen!\n", __func__, tm->cm_flags);
1292 mprsas_free_tm(sc, tm);
1296 if (reply == NULL) {
1297 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1299 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1300 /* this completion was due to a reset, just cleanup */
1301 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1302 "reset, ignoring NULL LUN reset reply\n");
1304 mprsas_free_tm(sc, tm);
1307 /* we should have gotten a reply. */
1308 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1309 "LUN reset attempt, resetting controller\n");
1315 mpr_dprint(sc, MPR_RECOVERY,
1316 "logical unit reset status 0x%x code 0x%x count %u\n",
1317 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1318 le32toh(reply->TerminationCount));
1321 * See if there are any outstanding commands for this LUN.
1322 * This could be made more efficient by using a per-LU data
1323 * structure of some sort.
1325 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1326 if (cm->cm_lun == tm->cm_lun)
1330 if (cm_count == 0) {
1331 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1332 "Finished recovery after LUN reset for target %u\n",
1335 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1339 * We've finished recovery for this logical unit. check and
1340 * see if some other logical unit has a timedout command
1341 * that needs to be processed.
1343 cm = TAILQ_FIRST(&targ->timedout_commands);
1345 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1346 "More commands to abort for target %u\n", targ->tid);
1347 mprsas_send_abort(sc, tm, cm);
1350 mprsas_free_tm(sc, tm);
1353 /* if we still have commands for this LUN, the reset
1354 * effectively failed, regardless of the status reported.
1355 * Escalate to a target reset.
1357 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1358 "logical unit reset complete for target %u, but still "
1359 "have %u command(s), sending target reset\n", targ->tid,
1361 mprsas_send_reset(sc, tm,
1362 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1367 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1369 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1370 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1371 struct mprsas_target *targ;
1373 callout_stop(&tm->cm_callout);
1375 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1376 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1380 * Currently there should be no way we can hit this case. It only
1381 * happens when we have a failure to allocate chain frames, and
1382 * task management commands don't have S/G lists.
1384 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1385 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1386 "reset! This should not happen!\n", __func__, tm->cm_flags);
1387 mprsas_free_tm(sc, tm);
1391 if (reply == NULL) {
1392 mpr_dprint(sc, MPR_RECOVERY,
1393 "NULL target reset reply for tm %p TaskMID %u\n",
1394 tm, le16toh(req->TaskMID));
1395 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1396 /* this completion was due to a reset, just cleanup */
1397 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1398 "reset, ignoring NULL target reset reply\n");
1400 mprsas_free_tm(sc, tm);
1403 /* we should have gotten a reply. */
1404 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1405 "target reset attempt, resetting controller\n");
1411 mpr_dprint(sc, MPR_RECOVERY,
1412 "target reset status 0x%x code 0x%x count %u\n",
1413 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1414 le32toh(reply->TerminationCount));
1416 if (targ->outstanding == 0) {
1418 * We've finished recovery for this target and all
1419 * of its logical units.
1421 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1422 "Finished reset recovery for target %u\n", targ->tid);
1424 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1428 mprsas_free_tm(sc, tm);
1431 * After a target reset, if this target still has
1432 * outstanding commands, the reset effectively failed,
1433 * regardless of the status reported. escalate.
1435 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1436 "Target reset complete for target %u, but still have %u "
1437 "command(s), resetting controller\n", targ->tid,
1443 #define MPR_RESET_TIMEOUT 30
1446 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1448 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1449 struct mprsas_target *target;
1452 target = tm->cm_targ;
1453 if (target->handle == 0) {
1454 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1455 "%d\n", __func__, target->tid);
1459 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1460 req->DevHandle = htole16(target->handle);
1461 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1462 req->TaskType = type;
1464 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1465 /* XXX Need to handle invalid LUNs */
1466 MPR_SET_LUN(req->LUN, tm->cm_lun);
1467 tm->cm_targ->logical_unit_resets++;
1468 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1469 "Sending logical unit reset to target %u lun %d\n",
1470 target->tid, tm->cm_lun);
1471 tm->cm_complete = mprsas_logical_unit_reset_complete;
1472 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1473 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1475 * Target reset method =
1476 * SAS Hard Link Reset / SATA Link Reset
1478 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1479 tm->cm_targ->target_resets++;
1480 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1481 "Sending target reset to target %u\n", target->tid);
1482 tm->cm_complete = mprsas_target_reset_complete;
1483 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1486 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1490 if (target->encl_level_valid) {
1491 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1492 "At enclosure level %d, slot %d, connector name (%4s)\n",
1493 target->encl_level, target->encl_slot,
1494 target->connector_name);
1498 tm->cm_desc.HighPriority.RequestFlags =
1499 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1500 tm->cm_complete_data = (void *)tm;
1502 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1503 mprsas_tm_timeout, tm);
1505 err = mpr_map_command(sc, tm);
1507 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1508 "error %d sending reset type %u\n", err, type);
1515 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1517 struct mpr_command *cm;
1518 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1519 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1520 struct mprsas_target *targ;
1522 callout_stop(&tm->cm_callout);
1524 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1525 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1529 * Currently there should be no way we can hit this case. It only
1530 * happens when we have a failure to allocate chain frames, and
1531 * task management commands don't have S/G lists.
1533 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1534 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1535 "cm_flags = %#x for abort %p TaskMID %u!\n",
1536 tm->cm_flags, tm, le16toh(req->TaskMID));
1537 mprsas_free_tm(sc, tm);
1541 if (reply == NULL) {
1542 mpr_dprint(sc, MPR_RECOVERY,
1543 "NULL abort reply for tm %p TaskMID %u\n",
1544 tm, le16toh(req->TaskMID));
1545 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1546 /* this completion was due to a reset, just cleanup */
1547 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1548 "reset, ignoring NULL abort reply\n");
1550 mprsas_free_tm(sc, tm);
1552 /* we should have gotten a reply. */
1553 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1554 "abort attempt, resetting controller\n");
1560 mpr_dprint(sc, MPR_RECOVERY,
1561 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1562 le16toh(req->TaskMID),
1563 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1564 le32toh(reply->TerminationCount));
1566 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1569 * if there are no more timedout commands, we're done with
1570 * error recovery for this target.
1572 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1573 "Finished abort recovery for target %u\n", targ->tid);
1575 mprsas_free_tm(sc, tm);
1576 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1577 /* abort success, but we have more timedout commands to abort */
1578 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1579 "Continuing abort recovery for target %u\n", targ->tid);
1580 mprsas_send_abort(sc, tm, cm);
1583 * we didn't get a command completion, so the abort
1584 * failed as far as we're concerned. escalate.
1586 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1587 "Abort failed for target %u, sending logical unit reset\n",
1590 mprsas_send_reset(sc, tm,
1591 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1595 #define MPR_ABORT_TIMEOUT 5
1598 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1599 struct mpr_command *cm)
1601 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1602 struct mprsas_target *targ;
1606 if (targ->handle == 0) {
1607 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1608 "%s null devhandle for target_id %d\n",
1609 __func__, cm->cm_ccb->ccb_h.target_id);
1613 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1614 "Aborting command %p\n", cm);
1616 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1617 req->DevHandle = htole16(targ->handle);
1618 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1619 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1621 /* XXX Need to handle invalid LUNs */
1622 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1624 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1627 tm->cm_desc.HighPriority.RequestFlags =
1628 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1629 tm->cm_complete = mprsas_abort_complete;
1630 tm->cm_complete_data = (void *)tm;
1631 tm->cm_targ = cm->cm_targ;
1632 tm->cm_lun = cm->cm_lun;
1634 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1635 mprsas_tm_timeout, tm);
1639 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1641 err = mpr_map_command(sc, tm);
1643 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1644 "error %d sending abort for cm %p SMID %u\n",
1645 err, cm, req->TaskMID);
1650 mprsas_scsiio_timeout(void *data)
1652 sbintime_t elapsed, now;
1654 struct mpr_softc *sc;
1655 struct mpr_command *cm;
1656 struct mprsas_target *targ;
1658 cm = (struct mpr_command *)data;
1664 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1666 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1669 * Run the interrupt handler to make sure it's not pending. This
1670 * isn't perfect because the command could have already completed
1671 * and been re-used, though this is unlikely.
1673 mpr_intr_locked(sc);
1674 if (cm->cm_state == MPR_CM_STATE_FREE) {
1675 mprsas_log_command(cm, MPR_XINFO,
1676 "SCSI command %p almost timed out\n", cm);
1680 if (cm->cm_ccb == NULL) {
1681 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1688 elapsed = now - ccb->ccb_h.qos.sim_data;
1689 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1690 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1691 targ->tid, targ->handle, ccb->ccb_h.timeout,
1692 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1693 if (targ->encl_level_valid) {
1694 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1695 "At enclosure level %d, slot %d, connector name (%4s)\n",
1696 targ->encl_level, targ->encl_slot, targ->connector_name);
1699 /* XXX first, check the firmware state, to see if it's still
1700 * operational. if not, do a diag reset.
1702 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1703 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1704 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1706 if (targ->tm != NULL) {
1707 /* target already in recovery, just queue up another
1708 * timedout command to be processed later.
1710 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1711 "processing by tm %p\n", cm, targ->tm);
1713 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1715 /* start recovery by aborting the first timedout command */
1716 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1717 "Sending abort to target %u for SMID %d\n", targ->tid,
1718 cm->cm_desc.Default.SMID);
1719 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1721 mprsas_send_abort(sc, targ->tm, cm);
1724 /* XXX queue this target up for recovery once a TM becomes
1725 * available. The firmware only has a limited number of
1726 * HighPriority credits for the high priority requests used
1727 * for task management, and we ran out.
1729 * Isilon: don't worry about this for now, since we have
1730 * more credits than disks in an enclosure, and limit
1731 * ourselves to one TM per target for recovery.
1733 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1734 "timedout cm %p failed to allocate a tm\n", cm);
1739 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1741 * Return 0 - for success,
1742 * 1 - to immediately return back the command with success status to CAM
1743 * negative value - to fallback to firmware path i.e. issue scsi unmap
1744 * to FW without any translation.
1747 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1748 union ccb *ccb, struct mprsas_target *targ)
1750 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1751 struct ccb_scsiio *csio;
1752 struct unmap_parm_list *plist;
1753 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1754 struct nvme_command *c;
1756 uint16_t ndesc, list_len, data_length;
1757 struct mpr_prp_page *prp_page_info;
1758 uint64_t nvme_dsm_ranges_dma_handle;
1761 #if __FreeBSD_version >= 1100103
1762 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1764 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1765 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1766 ccb->csio.cdb_io.cdb_ptr[8]);
1768 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1769 ccb->csio.cdb_io.cdb_bytes[8]);
1773 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1777 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1779 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1780 "save UNMAP data\n");
1784 /* Copy SCSI unmap data to a local buffer */
1785 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1787 /* return back the unmap command to CAM with success status,
1788 * if number of descripts is zero.
1790 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1792 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1793 "UNMAP cmd is Zero\n");
1798 data_length = ndesc * sizeof(struct nvme_dsm_range);
1799 if (data_length > targ->MDTS) {
1800 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1801 "Device's MDTS: %d\n", data_length, targ->MDTS);
1806 prp_page_info = mpr_alloc_prp_page(sc);
1807 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1808 "UNMAP command.\n", __func__));
1811 * Insert the allocated PRP page into the command's PRP page list. This
1812 * will be freed when the command is freed.
1814 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1816 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1817 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1819 bzero(nvme_dsm_ranges, data_length);
1821 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1822 * for each descriptors contained in SCSI UNMAP data.
1824 for (i = 0; i < ndesc; i++) {
1825 nvme_dsm_ranges[i].length =
1826 htole32(be32toh(plist->desc[i].nlb));
1827 nvme_dsm_ranges[i].starting_lba =
1828 htole64(be64toh(plist->desc[i].slba));
1829 nvme_dsm_ranges[i].attributes = 0;
1832 /* Build MPI2.6's NVMe Encapsulated Request Message */
1833 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1834 bzero(req, sizeof(*req));
1835 req->DevHandle = htole16(targ->handle);
1836 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1837 req->Flags = MPI26_NVME_FLAGS_WRITE;
1838 req->ErrorResponseBaseAddress.High =
1839 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1840 req->ErrorResponseBaseAddress.Low =
1841 htole32(cm->cm_sense_busaddr);
1842 req->ErrorResponseAllocationLength =
1843 htole16(sizeof(struct nvme_completion));
1844 req->EncapsulatedCommandLength =
1845 htole16(sizeof(struct nvme_command));
1846 req->DataLength = htole32(data_length);
1848 /* Build NVMe DSM command */
1849 c = (struct nvme_command *) req->NVMe_Command;
1850 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1851 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1852 c->cdw10 = htole32(ndesc - 1);
1853 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1855 cm->cm_length = data_length;
1858 cm->cm_complete = mprsas_scsiio_complete;
1859 cm->cm_complete_data = ccb;
1861 cm->cm_lun = csio->ccb_h.target_lun;
1864 cm->cm_desc.Default.RequestFlags =
1865 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1867 csio->ccb_h.qos.sim_data = sbinuptime();
1868 #if __FreeBSD_version >= 1000029
1869 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1870 mprsas_scsiio_timeout, cm, 0);
1871 #else //__FreeBSD_version < 1000029
1872 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1873 mprsas_scsiio_timeout, cm);
1874 #endif //__FreeBSD_version >= 1000029
1877 targ->outstanding++;
1878 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1879 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1881 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1882 __func__, cm, ccb, targ->outstanding);
1884 mpr_build_nvme_prp(sc, cm, req,
1885 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1886 mpr_map_command(sc, cm);
1894 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1896 MPI2_SCSI_IO_REQUEST *req;
1897 struct ccb_scsiio *csio;
1898 struct mpr_softc *sc;
1899 struct mprsas_target *targ;
1900 struct mprsas_lun *lun;
1901 struct mpr_command *cm;
1902 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1903 uint16_t eedp_flags;
1904 uint32_t mpi_control;
1909 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1912 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1913 ("Target %d out of bounds in XPT_SCSI_IO\n",
1914 csio->ccb_h.target_id));
1915 targ = &sassc->targets[csio->ccb_h.target_id];
1916 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1917 if (targ->handle == 0x0) {
1918 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1919 __func__, csio->ccb_h.target_id);
1920 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1924 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1925 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1926 "supported %u\n", __func__, csio->ccb_h.target_id);
1927 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1932 * Sometimes, it is possible to get a command that is not "In
1933 * Progress" and was actually aborted by the upper layer. Check for
1934 * this here and complete the command without error.
1936 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1937 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1938 "target %u\n", __func__, csio->ccb_h.target_id);
1943 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1944 * that the volume has timed out. We want volumes to be enumerated
1945 * until they are deleted/removed, not just failed.
1947 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1948 if (targ->devinfo == 0)
1949 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1951 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1956 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1957 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1958 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1964 * If target has a reset in progress, freeze the devq and return. The
1965 * devq will be released when the TM reset is finished.
1967 if (targ->flags & MPRSAS_TARGET_INRESET) {
1968 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1969 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1970 __func__, targ->tid);
1971 xpt_freeze_devq(ccb->ccb_h.path, 1);
1976 cm = mpr_alloc_command(sc);
1977 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1979 mpr_free_command(sc, cm);
1981 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1982 xpt_freeze_simq(sassc->sim, 1);
1983 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1985 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1986 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1991 /* For NVME device's issue UNMAP command directly to NVME drives by
1992 * constructing equivalent native NVMe DataSetManagement command.
1994 #if __FreeBSD_version >= 1100103
1995 scsi_opcode = scsiio_cdb_ptr(csio)[0];
1997 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1998 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2000 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2002 if (scsi_opcode == UNMAP &&
2004 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2005 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2006 if (rc == 1) { /* return command to CAM with success status */
2007 mpr_free_command(sc, cm);
2008 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2011 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2015 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2016 bzero(req, sizeof(*req));
2017 req->DevHandle = htole16(targ->handle);
2018 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2020 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2021 req->SenseBufferLength = MPR_SENSE_LEN;
2023 req->ChainOffset = 0;
2024 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2029 req->DataLength = htole32(csio->dxfer_len);
2030 req->BidirectionalDataLength = 0;
2031 req->IoFlags = htole16(csio->cdb_len);
2034 /* Note: BiDirectional transfers are not supported */
2035 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2037 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2038 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2041 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2042 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2046 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2050 if (csio->cdb_len == 32)
2051 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2053 * It looks like the hardware doesn't require an explicit tag
2054 * number for each transaction. SAM Task Management not supported
2057 switch (csio->tag_action) {
2058 case MSG_HEAD_OF_Q_TAG:
2059 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2061 case MSG_ORDERED_Q_TAG:
2062 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2065 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2067 case CAM_TAG_ACTION_NONE:
2068 case MSG_SIMPLE_Q_TAG:
2070 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2073 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2074 req->Control = htole32(mpi_control);
2076 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2077 mpr_free_command(sc, cm);
2078 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2083 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2084 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2086 KASSERT(csio->cdb_len <= IOCDBLEN,
2087 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2088 "is not set", csio->cdb_len));
2089 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2091 req->IoFlags = htole16(csio->cdb_len);
2094 * Check if EEDP is supported and enabled. If it is then check if the
2095 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2096 * is formatted for EEDP support. If all of this is true, set CDB up
2097 * for EEDP transfer.
2099 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2100 if (sc->eedp_enabled && eedp_flags) {
2101 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2102 if (lun->lun_id == csio->ccb_h.target_lun) {
2107 if ((lun != NULL) && (lun->eedp_formatted)) {
2108 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2109 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2110 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2111 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2112 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2114 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2116 req->EEDPFlags = htole16(eedp_flags);
2119 * If CDB less than 32, fill in Primary Ref Tag with
2120 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2121 * already there. Also, set protection bit. FreeBSD
2122 * currently does not support CDBs bigger than 16, but
2123 * the code doesn't hurt, and will be here for the
2126 if (csio->cdb_len != 32) {
2127 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2128 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2129 PrimaryReferenceTag;
2130 for (i = 0; i < 4; i++) {
2132 req->CDB.CDB32[lba_byte + i];
2135 req->CDB.EEDP32.PrimaryReferenceTag =
2137 CDB.EEDP32.PrimaryReferenceTag);
2138 req->CDB.EEDP32.PrimaryApplicationTagMask =
2140 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2144 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2145 req->EEDPFlags = htole16(eedp_flags);
2146 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2152 cm->cm_length = csio->dxfer_len;
2153 if (cm->cm_length != 0) {
2155 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2159 cm->cm_sge = &req->SGL;
2160 cm->cm_sglsize = (32 - 24) * 4;
2161 cm->cm_complete = mprsas_scsiio_complete;
2162 cm->cm_complete_data = ccb;
2164 cm->cm_lun = csio->ccb_h.target_lun;
2167 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2168 * and set descriptor type.
2170 if (targ->scsi_req_desc_type ==
2171 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2172 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2173 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2174 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2175 if (!sc->atomic_desc_capable) {
2176 cm->cm_desc.FastPathSCSIIO.DevHandle =
2177 htole16(targ->handle);
2180 cm->cm_desc.SCSIIO.RequestFlags =
2181 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2182 if (!sc->atomic_desc_capable)
2183 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2186 csio->ccb_h.qos.sim_data = sbinuptime();
2187 #if __FreeBSD_version >= 1000029
2188 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2189 mprsas_scsiio_timeout, cm, 0);
2190 #else //__FreeBSD_version < 1000029
2191 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2192 mprsas_scsiio_timeout, cm);
2193 #endif //__FreeBSD_version >= 1000029
2196 targ->outstanding++;
2197 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2198 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2200 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2201 __func__, cm, ccb, targ->outstanding);
2203 mpr_map_command(sc, cm);
2208 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2211 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2212 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2216 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2217 MPI2_IOCSTATUS_MASK;
2218 u8 scsi_state = mpi_reply->SCSIState;
2219 u8 scsi_status = mpi_reply->SCSIStatus;
2220 char *desc_ioc_state = NULL;
2221 char *desc_scsi_status = NULL;
2222 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2224 if (log_info == 0x31170000)
2227 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2229 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2232 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2233 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2234 if (targ->encl_level_valid) {
2235 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2236 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2237 targ->connector_name);
2241 * We can add more detail about underflow data here
2244 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2245 "scsi_state %b\n", desc_scsi_status, scsi_status,
2246 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2247 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2249 if (sc->mpr_debug & MPR_XINFO &&
2250 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2251 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2252 scsi_sense_print(csio);
2253 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2256 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2257 response_info = le32toh(mpi_reply->ResponseInfo);
2258 response_bytes = (u8 *)&response_info;
2259 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2261 mpr_describe_table(mpr_scsi_taskmgmt_string,
2262 response_bytes[0]));
2266 /** mprsas_nvme_trans_status_code
2268 * Convert Native NVMe command error status to
2269 * equivalent SCSI error status.
2271 * Returns appropriate scsi_status
2274 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2275 struct mpr_command *cm)
2277 u8 status = MPI2_SCSI_STATUS_GOOD;
2278 int skey, asc, ascq;
2279 union ccb *ccb = cm->cm_complete_data;
2280 int returned_sense_len;
2282 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2283 skey = SSD_KEY_ILLEGAL_REQUEST;
2284 asc = SCSI_ASC_NO_SENSE;
2285 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2287 switch (nvme_status.sct) {
2288 case NVME_SCT_GENERIC:
2289 switch (nvme_status.sc) {
2290 case NVME_SC_SUCCESS:
2291 status = MPI2_SCSI_STATUS_GOOD;
2292 skey = SSD_KEY_NO_SENSE;
2293 asc = SCSI_ASC_NO_SENSE;
2294 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2296 case NVME_SC_INVALID_OPCODE:
2297 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2298 skey = SSD_KEY_ILLEGAL_REQUEST;
2299 asc = SCSI_ASC_ILLEGAL_COMMAND;
2300 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2302 case NVME_SC_INVALID_FIELD:
2303 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2304 skey = SSD_KEY_ILLEGAL_REQUEST;
2305 asc = SCSI_ASC_INVALID_CDB;
2306 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2308 case NVME_SC_DATA_TRANSFER_ERROR:
2309 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2310 skey = SSD_KEY_MEDIUM_ERROR;
2311 asc = SCSI_ASC_NO_SENSE;
2312 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2314 case NVME_SC_ABORTED_POWER_LOSS:
2315 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2316 skey = SSD_KEY_ABORTED_COMMAND;
2317 asc = SCSI_ASC_WARNING;
2318 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2320 case NVME_SC_INTERNAL_DEVICE_ERROR:
2321 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2322 skey = SSD_KEY_HARDWARE_ERROR;
2323 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2324 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2326 case NVME_SC_ABORTED_BY_REQUEST:
2327 case NVME_SC_ABORTED_SQ_DELETION:
2328 case NVME_SC_ABORTED_FAILED_FUSED:
2329 case NVME_SC_ABORTED_MISSING_FUSED:
2330 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2331 skey = SSD_KEY_ABORTED_COMMAND;
2332 asc = SCSI_ASC_NO_SENSE;
2333 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2335 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2336 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2337 skey = SSD_KEY_ILLEGAL_REQUEST;
2338 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2339 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2341 case NVME_SC_LBA_OUT_OF_RANGE:
2342 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2343 skey = SSD_KEY_ILLEGAL_REQUEST;
2344 asc = SCSI_ASC_ILLEGAL_BLOCK;
2345 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2347 case NVME_SC_CAPACITY_EXCEEDED:
2348 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2349 skey = SSD_KEY_MEDIUM_ERROR;
2350 asc = SCSI_ASC_NO_SENSE;
2351 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2353 case NVME_SC_NAMESPACE_NOT_READY:
2354 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2355 skey = SSD_KEY_NOT_READY;
2356 asc = SCSI_ASC_LUN_NOT_READY;
2357 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2361 case NVME_SCT_COMMAND_SPECIFIC:
2362 switch (nvme_status.sc) {
2363 case NVME_SC_INVALID_FORMAT:
2364 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2365 skey = SSD_KEY_ILLEGAL_REQUEST;
2366 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2367 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2369 case NVME_SC_CONFLICTING_ATTRIBUTES:
2370 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2371 skey = SSD_KEY_ILLEGAL_REQUEST;
2372 asc = SCSI_ASC_INVALID_CDB;
2373 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2377 case NVME_SCT_MEDIA_ERROR:
2378 switch (nvme_status.sc) {
2379 case NVME_SC_WRITE_FAULTS:
2380 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2381 skey = SSD_KEY_MEDIUM_ERROR;
2382 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2383 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2385 case NVME_SC_UNRECOVERED_READ_ERROR:
2386 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2387 skey = SSD_KEY_MEDIUM_ERROR;
2388 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2389 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2391 case NVME_SC_GUARD_CHECK_ERROR:
2392 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2393 skey = SSD_KEY_MEDIUM_ERROR;
2394 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2395 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2397 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2398 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2399 skey = SSD_KEY_MEDIUM_ERROR;
2400 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2401 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2403 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2404 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2405 skey = SSD_KEY_MEDIUM_ERROR;
2406 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2407 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2409 case NVME_SC_COMPARE_FAILURE:
2410 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2411 skey = SSD_KEY_MISCOMPARE;
2412 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2413 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2415 case NVME_SC_ACCESS_DENIED:
2416 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2417 skey = SSD_KEY_ILLEGAL_REQUEST;
2418 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2419 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2425 returned_sense_len = sizeof(struct scsi_sense_data);
2426 if (returned_sense_len < ccb->csio.sense_len)
2427 ccb->csio.sense_resid = ccb->csio.sense_len -
2430 ccb->csio.sense_resid = 0;
2432 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2433 1, skey, asc, ascq, SSD_ELEM_NONE);
2434 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2439 /** mprsas_complete_nvme_unmap
2441 * Complete native NVMe command issued using NVMe Encapsulated
2445 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2447 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2448 struct nvme_completion *nvme_completion = NULL;
2449 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2451 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2452 if (le16toh(mpi_reply->ErrorResponseCount)){
2453 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2454 scsi_status = mprsas_nvme_trans_status_code(
2455 nvme_completion->status, cm);
2461 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2463 MPI2_SCSI_IO_REPLY *rep;
2465 struct ccb_scsiio *csio;
2466 struct mprsas_softc *sassc;
2467 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2468 u8 *TLR_bits, TLR_on, *scsi_cdb;
2471 struct mprsas_target *target;
2472 target_id_t target_id;
2475 mpr_dprint(sc, MPR_TRACE,
2476 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2477 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2478 cm->cm_targ->outstanding);
2480 callout_stop(&cm->cm_callout);
2481 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2484 ccb = cm->cm_complete_data;
2486 target_id = csio->ccb_h.target_id;
2487 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2489 * XXX KDM if the chain allocation fails, does it matter if we do
2490 * the sync and unload here? It is simpler to do it in every case,
2491 * assuming it doesn't cause problems.
2493 if (cm->cm_data != NULL) {
2494 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2495 dir = BUS_DMASYNC_POSTREAD;
2496 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2497 dir = BUS_DMASYNC_POSTWRITE;
2498 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2499 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2502 cm->cm_targ->completed++;
2503 cm->cm_targ->outstanding--;
2504 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2505 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2507 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2508 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2509 if (cm->cm_reply != NULL)
2510 mprsas_log_command(cm, MPR_RECOVERY,
2511 "completed timedout cm %p ccb %p during recovery "
2512 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2513 le16toh(rep->IOCStatus), rep->SCSIStatus,
2514 rep->SCSIState, le32toh(rep->TransferCount));
2516 mprsas_log_command(cm, MPR_RECOVERY,
2517 "completed timedout cm %p ccb %p during recovery\n",
2519 } else if (cm->cm_targ->tm != NULL) {
2520 if (cm->cm_reply != NULL)
2521 mprsas_log_command(cm, MPR_RECOVERY,
2522 "completed cm %p ccb %p during recovery "
2523 "ioc %x scsi %x state %x xfer %u\n",
2524 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2525 rep->SCSIStatus, rep->SCSIState,
2526 le32toh(rep->TransferCount));
2528 mprsas_log_command(cm, MPR_RECOVERY,
2529 "completed cm %p ccb %p during recovery\n",
2531 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2532 mprsas_log_command(cm, MPR_RECOVERY,
2533 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2536 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2538 * We ran into an error after we tried to map the command,
2539 * so we're getting a callback without queueing the command
2540 * to the hardware. So we set the status here, and it will
2541 * be retained below. We'll go through the "fast path",
2542 * because there can be no reply when we haven't actually
2543 * gone out to the hardware.
2545 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2548 * Currently the only error included in the mask is
2549 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2550 * chain frames. We need to freeze the queue until we get
2551 * a command that completed without this error, which will
2552 * hopefully have some chain frames attached that we can
2553 * use. If we wanted to get smarter about it, we would
2554 * only unfreeze the queue in this condition when we're
2555 * sure that we're getting some chain frames back. That's
2556 * probably unnecessary.
2558 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2559 xpt_freeze_simq(sassc->sim, 1);
2560 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2561 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2562 "freezing SIM queue\n");
2567 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2568 * flag, and use it in a few places in the rest of this function for
2569 * convenience. Use the macro if available.
2571 #if __FreeBSD_version >= 1100103
2572 scsi_cdb = scsiio_cdb_ptr(csio);
2574 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2575 scsi_cdb = csio->cdb_io.cdb_ptr;
2577 scsi_cdb = csio->cdb_io.cdb_bytes;
2581 * If this is a Start Stop Unit command and it was issued by the driver
2582 * during shutdown, decrement the refcount to account for all of the
2583 * commands that were sent. All SSU commands should be completed before
2584 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2587 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2588 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2592 /* Take the fast path to completion */
2593 if (cm->cm_reply == NULL) {
2594 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2595 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2596 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2598 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2599 csio->scsi_status = SCSI_STATUS_OK;
2601 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2602 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2603 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2604 mpr_dprint(sc, MPR_XINFO,
2605 "Unfreezing SIM queue\n");
2610 * There are two scenarios where the status won't be
2611 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2612 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2614 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2616 * Freeze the dev queue so that commands are
2617 * executed in the correct order after error
2620 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2621 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2623 mpr_free_command(sc, cm);
2628 target = &sassc->targets[target_id];
2629 if (scsi_cdb[0] == UNMAP &&
2631 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2632 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2633 csio->scsi_status = rep->SCSIStatus;
2636 mprsas_log_command(cm, MPR_XINFO,
2637 "ioc %x scsi %x state %x xfer %u\n",
2638 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2639 le32toh(rep->TransferCount));
2641 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2642 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2643 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2645 case MPI2_IOCSTATUS_SUCCESS:
2646 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2647 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2648 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2649 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2651 /* Completion failed at the transport level. */
2652 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2653 MPI2_SCSI_STATE_TERMINATED)) {
2654 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2658 /* In a modern packetized environment, an autosense failure
2659 * implies that there's not much else that can be done to
2660 * recover the command.
2662 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2663 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2668 * CAM doesn't care about SAS Response Info data, but if this is
2669 * the state check if TLR should be done. If not, clear the
2670 * TLR_bits for the target.
2672 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2673 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2674 == MPR_SCSI_RI_INVALID_FRAME)) {
2675 sc->mapping_table[target_id].TLR_bits =
2676 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2680 * Intentionally override the normal SCSI status reporting
2681 * for these two cases. These are likely to happen in a
2682 * multi-initiator environment, and we want to make sure that
2683 * CAM retries these commands rather than fail them.
2685 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2686 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2687 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2691 /* Handle normal status and sense */
2692 csio->scsi_status = rep->SCSIStatus;
2693 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2694 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2696 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2698 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2699 int sense_len, returned_sense_len;
2701 returned_sense_len = min(le32toh(rep->SenseCount),
2702 sizeof(struct scsi_sense_data));
2703 if (returned_sense_len < csio->sense_len)
2704 csio->sense_resid = csio->sense_len -
2707 csio->sense_resid = 0;
2709 sense_len = min(returned_sense_len,
2710 csio->sense_len - csio->sense_resid);
2711 bzero(&csio->sense_data, sizeof(csio->sense_data));
2712 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2713 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2717 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2718 * and it's page code 0 (Supported Page List), and there is
2719 * inquiry data, and this is for a sequential access device, and
2720 * the device is an SSP target, and TLR is supported by the
2721 * controller, turn the TLR_bits value ON if page 0x90 is
2724 if ((scsi_cdb[0] == INQUIRY) &&
2725 (scsi_cdb[1] & SI_EVPD) &&
2726 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2727 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2728 (csio->data_ptr != NULL) &&
2729 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2730 (sc->control_TLR) &&
2731 (sc->mapping_table[target_id].device_info &
2732 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2733 vpd_list = (struct scsi_vpd_supported_page_list *)
2735 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2736 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2737 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2738 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2739 alloc_len -= csio->resid;
2740 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2741 if (vpd_list->list[i] == 0x90) {
2749 * If this is a SATA direct-access end device, mark it so that
2750 * a SCSI StartStopUnit command will be sent to it when the
2751 * driver is being shutdown.
2753 if ((scsi_cdb[0] == INQUIRY) &&
2754 (csio->data_ptr != NULL) &&
2755 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2756 (sc->mapping_table[target_id].device_info &
2757 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2758 ((sc->mapping_table[target_id].device_info &
2759 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2760 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2761 target = &sassc->targets[target_id];
2762 target->supports_SSU = TRUE;
2763 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2767 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2768 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2770 * If devinfo is 0 this will be a volume. In that case don't
2771 * tell CAM that the volume is not there. We want volumes to
2772 * be enumerated until they are deleted/removed, not just
2775 if (cm->cm_targ->devinfo == 0)
2776 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2778 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2780 case MPI2_IOCSTATUS_INVALID_SGL:
2781 mpr_print_scsiio_cmd(sc, cm);
2782 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2784 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2786 * This is one of the responses that comes back when an I/O
2787 * has been aborted. If it is because of a timeout that we
2788 * initiated, just set the status to CAM_CMD_TIMEOUT.
2789 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2790 * command is the same (it gets retried, subject to the
2791 * retry counter), the only difference is what gets printed
2794 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2795 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2797 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2799 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2800 /* resid is ignored for this condition */
2802 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2804 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2805 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2807 * These can sometimes be transient transport-related
2808 * errors, and sometimes persistent drive-related errors.
2809 * We used to retry these without decrementing the retry
2810 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2811 * we hit a persistent drive problem that returns one of
2812 * these error codes, we would retry indefinitely. So,
2813 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2814 * count and avoid infinite retries. We're taking the
2815 * potential risk of flagging false failures in the event
2816 * of a topology-related error (e.g. a SAS expander problem
2817 * causes a command addressed to a drive to fail), but
2818 * avoiding getting into an infinite retry loop.
2820 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2821 mpr_dprint(sc, MPR_INFO,
2822 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2823 mpr_describe_table(mpr_iocstatus_string,
2824 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2825 target_id, cm->cm_desc.Default.SMID,
2826 le32toh(rep->IOCLogInfo));
2827 mpr_dprint(sc, MPR_XINFO,
2828 "SCSIStatus %x SCSIState %x xfercount %u\n",
2829 rep->SCSIStatus, rep->SCSIState,
2830 le32toh(rep->TransferCount));
2832 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2833 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2834 case MPI2_IOCSTATUS_INVALID_VPID:
2835 case MPI2_IOCSTATUS_INVALID_FIELD:
2836 case MPI2_IOCSTATUS_INVALID_STATE:
2837 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2838 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2839 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2840 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2841 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2843 mprsas_log_command(cm, MPR_XINFO,
2844 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2845 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2846 rep->SCSIStatus, rep->SCSIState,
2847 le32toh(rep->TransferCount));
2848 csio->resid = cm->cm_length;
2850 if (scsi_cdb[0] == UNMAP &&
2852 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2853 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2855 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2860 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2862 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2863 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2864 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2865 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2869 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2870 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2871 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2874 mpr_free_command(sc, cm);
2878 #if __FreeBSD_version >= 900026
2880 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2882 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2883 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2887 ccb = cm->cm_complete_data;
2890 * Currently there should be no way we can hit this case. It only
2891 * happens when we have a failure to allocate chain frames, and SMP
2892 * commands require two S/G elements only. That should be handled
2893 * in the standard request size.
2895 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2896 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2897 "request!\n", __func__, cm->cm_flags);
2898 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2902 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2904 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2905 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2909 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2910 sasaddr = le32toh(req->SASAddress.Low);
2911 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2913 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2914 MPI2_IOCSTATUS_SUCCESS ||
2915 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2916 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2917 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2918 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2922 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2923 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2925 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2926 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2928 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2932 * We sync in both directions because we had DMAs in the S/G list
2933 * in both directions.
2935 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2936 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2937 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2938 mpr_free_command(sc, cm);
2943 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2945 struct mpr_command *cm;
2946 uint8_t *request, *response;
2947 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2948 struct mpr_softc *sc;
2956 #if (__FreeBSD_version >= 1000028) || \
2957 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2958 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2959 case CAM_DATA_PADDR:
2960 case CAM_DATA_SG_PADDR:
2962 * XXX We don't yet support physical addresses here.
2964 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2965 "supported\n", __func__);
2966 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2971 * The chip does not support more than one buffer for the
2972 * request or response.
2974 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2975 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2976 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2977 "response buffer segments not supported for SMP\n",
2979 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2985 * The CAM_SCATTER_VALID flag was originally implemented
2986 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2987 * We have two. So, just take that flag to mean that we
2988 * might have S/G lists, and look at the S/G segment count
2989 * to figure out whether that is the case for each individual
2992 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2993 bus_dma_segment_t *req_sg;
2995 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2996 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2998 request = ccb->smpio.smp_request;
3000 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3001 bus_dma_segment_t *rsp_sg;
3003 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3004 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3006 response = ccb->smpio.smp_response;
3008 case CAM_DATA_VADDR:
3009 request = ccb->smpio.smp_request;
3010 response = ccb->smpio.smp_response;
3013 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3017 #else /* __FreeBSD_version < 1000028 */
3019 * XXX We don't yet support physical addresses here.
3021 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3022 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3023 "supported\n", __func__);
3024 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3030 * If the user wants to send an S/G list, check to make sure they
3031 * have single buffers.
3033 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3035 * The chip does not support more than one buffer for the
3036 * request or response.
3038 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3039 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3040 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3041 "response buffer segments not supported for SMP\n",
3043 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3049 * The CAM_SCATTER_VALID flag was originally implemented
3050 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3051 * We have two. So, just take that flag to mean that we
3052 * might have S/G lists, and look at the S/G segment count
3053 * to figure out whether that is the case for each individual
3056 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3057 bus_dma_segment_t *req_sg;
3059 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3060 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3062 request = ccb->smpio.smp_request;
3064 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3065 bus_dma_segment_t *rsp_sg;
3067 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3068 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3070 response = ccb->smpio.smp_response;
3072 request = ccb->smpio.smp_request;
3073 response = ccb->smpio.smp_response;
3075 #endif /* __FreeBSD_version < 1000028 */
3077 cm = mpr_alloc_command(sc);
3079 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3081 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3086 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3087 bzero(req, sizeof(*req));
3088 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3090 /* Allow the chip to use any route to this SAS address. */
3091 req->PhysicalPort = 0xff;
3093 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3095 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3097 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3098 "%#jx\n", __func__, (uintmax_t)sasaddr);
3100 mpr_init_sge(cm, req, &req->SGL);
3103 * Set up a uio to pass into mpr_map_command(). This allows us to
3104 * do one map command, and one busdma call in there.
3106 cm->cm_uio.uio_iov = cm->cm_iovec;
3107 cm->cm_uio.uio_iovcnt = 2;
3108 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3111 * The read/write flag isn't used by busdma, but set it just in
3112 * case. This isn't exactly accurate, either, since we're going in
3115 cm->cm_uio.uio_rw = UIO_WRITE;
3117 cm->cm_iovec[0].iov_base = request;
3118 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3119 cm->cm_iovec[1].iov_base = response;
3120 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3122 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3123 cm->cm_iovec[1].iov_len;
3126 * Trigger a warning message in mpr_data_cb() for the user if we
3127 * wind up exceeding two S/G segments. The chip expects one
3128 * segment for the request and another for the response.
3130 cm->cm_max_segs = 2;
3132 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3133 cm->cm_complete = mprsas_smpio_complete;
3134 cm->cm_complete_data = ccb;
3137 * Tell the mapping code that we're using a uio, and that this is
3138 * an SMP passthrough request. There is a little special-case
3139 * logic there (in mpr_data_cb()) to handle the bidirectional
3142 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3143 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3145 /* The chip data format is little endian. */
3146 req->SASAddress.High = htole32(sasaddr >> 32);
3147 req->SASAddress.Low = htole32(sasaddr);
3150 * XXX Note that we don't have a timeout/abort mechanism here.
3151 * From the manual, it looks like task management requests only
3152 * work for SCSI IO and SATA passthrough requests. We may need to
3153 * have a mechanism to retry requests in the event of a chip reset
3154 * at least. Hopefully the chip will insure that any errors short
3155 * of that are relayed back to the driver.
3157 error = mpr_map_command(sc, cm);
3158 if ((error != 0) && (error != EINPROGRESS)) {
3159 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3160 "mpr_map_command()\n", __func__, error);
3167 mpr_free_command(sc, cm);
3168 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3174 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3176 struct mpr_softc *sc;
3177 struct mprsas_target *targ;
3178 uint64_t sasaddr = 0;
3183 * Make sure the target exists.
3185 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3186 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3187 targ = &sassc->targets[ccb->ccb_h.target_id];
3188 if (targ->handle == 0x0) {
3189 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3190 __func__, ccb->ccb_h.target_id);
3191 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3197 * If this device has an embedded SMP target, we'll talk to it
3199 * figure out what the expander's address is.
3201 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3202 sasaddr = targ->sasaddr;
3205 * If we don't have a SAS address for the expander yet, try
3206 * grabbing it from the page 0x83 information cached in the
3207 * transport layer for this target. LSI expanders report the
3208 * expander SAS address as the port-associated SAS address in
3209 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3212 * XXX KDM disable this for now, but leave it commented out so that
3213 * it is obvious that this is another possible way to get the SAS
3216 * The parent handle method below is a little more reliable, and
3217 * the other benefit is that it works for devices other than SES
3218 * devices. So you can send a SMP request to a da(4) device and it
3219 * will get routed to the expander that device is attached to.
3220 * (Assuming the da(4) device doesn't contain an SMP target...)
3224 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3228 * If we still don't have a SAS address for the expander, look for
3229 * the parent device of this device, which is probably the expander.
3232 #ifdef OLD_MPR_PROBE
3233 struct mprsas_target *parent_target;
3236 if (targ->parent_handle == 0x0) {
3237 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3238 "a valid parent handle!\n", __func__, targ->handle);
3239 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3242 #ifdef OLD_MPR_PROBE
3243 parent_target = mprsas_find_target_by_handle(sassc, 0,
3244 targ->parent_handle);
3246 if (parent_target == NULL) {
3247 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3248 "a valid parent target!\n", __func__, targ->handle);
3249 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3253 if ((parent_target->devinfo &
3254 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3255 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3256 "does not have an SMP target!\n", __func__,
3257 targ->handle, parent_target->handle);
3258 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3262 sasaddr = parent_target->sasaddr;
3263 #else /* OLD_MPR_PROBE */
3264 if ((targ->parent_devinfo &
3265 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3266 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3267 "does not have an SMP target!\n", __func__,
3268 targ->handle, targ->parent_handle);
3269 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3273 if (targ->parent_sasaddr == 0x0) {
3274 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3275 "%d does not have a valid SAS address!\n", __func__,
3276 targ->handle, targ->parent_handle);
3277 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3281 sasaddr = targ->parent_sasaddr;
3282 #endif /* OLD_MPR_PROBE */
3287 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3288 "handle %d\n", __func__, targ->handle);
3289 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3292 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3300 #endif //__FreeBSD_version >= 900026
3303 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3305 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3306 struct mpr_softc *sc;
3307 struct mpr_command *tm;
3308 struct mprsas_target *targ;
3310 MPR_FUNCTRACE(sassc->sc);
3311 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3313 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3314 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3316 tm = mpr_alloc_command(sc);
3318 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3319 "mprsas_action_resetdev\n");
3320 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3325 targ = &sassc->targets[ccb->ccb_h.target_id];
3326 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3327 req->DevHandle = htole16(targ->handle);
3328 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3329 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3331 /* SAS Hard Link Reset / SATA Link Reset */
3332 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3335 tm->cm_desc.HighPriority.RequestFlags =
3336 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3337 tm->cm_complete = mprsas_resetdev_complete;
3338 tm->cm_complete_data = ccb;
3340 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3341 __func__, targ->tid);
3343 targ->flags |= MPRSAS_TARGET_INRESET;
3345 mpr_map_command(sc, tm);
3349 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3351 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3355 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3357 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3358 ccb = tm->cm_complete_data;
3361 * Currently there should be no way we can hit this case. It only
3362 * happens when we have a failure to allocate chain frames, and
3363 * task management commands don't have S/G lists.
3365 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3366 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3368 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3370 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3371 "handle %#04x! This should not happen!\n", __func__,
3372 tm->cm_flags, req->DevHandle);
3373 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3377 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3378 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3380 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3381 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3382 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3386 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3390 mprsas_free_tm(sc, tm);
3395 mprsas_poll(struct cam_sim *sim)
3397 struct mprsas_softc *sassc;
3399 sassc = cam_sim_softc(sim);
3401 if (sassc->sc->mpr_debug & MPR_TRACE) {
3402 /* frequent debug messages during a panic just slow
3403 * everything down too much.
3405 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3407 sassc->sc->mpr_debug &= ~MPR_TRACE;
3410 mpr_intr_locked(sassc->sc);
3414 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3417 struct mpr_softc *sc;
3419 sc = (struct mpr_softc *)callback_arg;
3422 #if (__FreeBSD_version >= 1000006) || \
3423 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3424 case AC_ADVINFO_CHANGED: {
3425 struct mprsas_target *target;
3426 struct mprsas_softc *sassc;
3427 struct scsi_read_capacity_data_long rcap_buf;
3428 struct ccb_dev_advinfo cdai;
3429 struct mprsas_lun *lun;
3434 buftype = (uintptr_t)arg;
3440 * We're only interested in read capacity data changes.
3442 if (buftype != CDAI_TYPE_RCAPLONG)
3446 * See the comment in mpr_attach_sas() for a detailed
3447 * explanation. In these versions of FreeBSD we register
3448 * for all events and filter out the events that don't
3451 #if (__FreeBSD_version < 1000703) || \
3452 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3453 if (xpt_path_path_id(path) != sassc->sim->path_id)
3458 * We should have a handle for this, but check to make sure.
3460 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3461 ("Target %d out of bounds in mprsas_async\n",
3462 xpt_path_target_id(path)));
3463 target = &sassc->targets[xpt_path_target_id(path)];
3464 if (target->handle == 0)
3467 lunid = xpt_path_lun_id(path);
3469 SLIST_FOREACH(lun, &target->luns, lun_link) {
3470 if (lun->lun_id == lunid) {
3476 if (found_lun == 0) {
3477 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3480 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3481 "LUN for EEDP support.\n");
3484 lun->lun_id = lunid;
3485 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3488 bzero(&rcap_buf, sizeof(rcap_buf));
3489 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3490 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3491 cdai.ccb_h.flags = CAM_DIR_IN;
3492 cdai.buftype = CDAI_TYPE_RCAPLONG;
3493 #if (__FreeBSD_version >= 1100061) || \
3494 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3495 cdai.flags = CDAI_FLAG_NONE;
3499 cdai.bufsiz = sizeof(rcap_buf);
3500 cdai.buf = (uint8_t *)&rcap_buf;
3501 xpt_action((union ccb *)&cdai);
3502 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3503 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3505 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3506 && (rcap_buf.prot & SRC16_PROT_EN)) {
3507 lun->eedp_formatted = TRUE;
3508 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3510 lun->eedp_formatted = FALSE;
3511 lun->eedp_block_size = 0;
3516 case AC_FOUND_DEVICE: {
3517 struct ccb_getdev *cgd;
3520 * See the comment in mpr_attach_sas() for a detailed
3521 * explanation. In these versions of FreeBSD we register
3522 * for all events and filter out the events that don't
3525 #if (__FreeBSD_version < 1000703) || \
3526 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3527 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3532 #if (__FreeBSD_version < 901503) || \
3533 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3534 mprsas_check_eedp(sc, path, cgd);
3543 #if (__FreeBSD_version < 901503) || \
3544 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3546 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3547 struct ccb_getdev *cgd)
3549 struct mprsas_softc *sassc = sc->sassc;
3550 struct ccb_scsiio *csio;
3551 struct scsi_read_capacity_16 *scsi_cmd;
3552 struct scsi_read_capacity_eedp *rcap_buf;
3554 target_id_t targetid;
3557 struct cam_path *local_path;
3558 struct mprsas_target *target;
3559 struct mprsas_lun *lun;
3563 pathid = cam_sim_path(sassc->sim);
3564 targetid = xpt_path_target_id(path);
3565 lunid = xpt_path_lun_id(path);
3567 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3568 "mprsas_check_eedp\n", targetid));
3569 target = &sassc->targets[targetid];
3570 if (target->handle == 0x0)
3574 * Determine if the device is EEDP capable.
3576 * If this flag is set in the inquiry data, the device supports
3577 * protection information, and must support the 16 byte read capacity
3578 * command, otherwise continue without sending read cap 16.
3580 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3584 * Issue a READ CAPACITY 16 command. This info is used to determine if
3585 * the LUN is formatted for EEDP support.
3587 ccb = xpt_alloc_ccb_nowait();
3589 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3594 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3596 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3603 * If LUN is already in list, don't create a new one.
3606 SLIST_FOREACH(lun, &target->luns, lun_link) {
3607 if (lun->lun_id == lunid) {
3613 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3616 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3618 xpt_free_path(local_path);
3622 lun->lun_id = lunid;
3623 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3626 xpt_path_string(local_path, path_str, sizeof(path_str));
3627 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3628 path_str, target->handle);
3631 * Issue a READ CAPACITY 16 command for the LUN. The
3632 * mprsas_read_cap_done function will load the read cap info into the
3635 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3637 if (rcap_buf == NULL) {
3638 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3639 "buffer for EEDP support.\n");
3640 xpt_free_path(ccb->ccb_h.path);
3644 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3646 csio->ccb_h.func_code = XPT_SCSI_IO;
3647 csio->ccb_h.flags = CAM_DIR_IN;
3648 csio->ccb_h.retry_count = 4;
3649 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3650 csio->ccb_h.timeout = 60000;
3651 csio->data_ptr = (uint8_t *)rcap_buf;
3652 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3653 csio->sense_len = MPR_SENSE_LEN;
3654 csio->cdb_len = sizeof(*scsi_cmd);
3655 csio->tag_action = MSG_SIMPLE_Q_TAG;
3657 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3658 bzero(scsi_cmd, sizeof(*scsi_cmd));
3659 scsi_cmd->opcode = 0x9E;
3660 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3661 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3663 ccb->ccb_h.ppriv_ptr1 = sassc;
3668 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3670 struct mprsas_softc *sassc;
3671 struct mprsas_target *target;
3672 struct mprsas_lun *lun;
3673 struct scsi_read_capacity_eedp *rcap_buf;
3675 if (done_ccb == NULL)
3678 /* Driver need to release devq, it Scsi command is
3679 * generated by driver internally.
3680 * Currently there is a single place where driver
3681 * calls scsi command internally. In future if driver
3682 * calls more scsi command internally, it needs to release
3683 * devq internally, since those command will not go back to
3686 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3687 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3688 xpt_release_devq(done_ccb->ccb_h.path,
3689 /*count*/ 1, /*run_queue*/TRUE);
3692 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3695 * Get the LUN ID for the path and look it up in the LUN list for the
3698 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3699 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3700 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3701 target = &sassc->targets[done_ccb->ccb_h.target_id];
3702 SLIST_FOREACH(lun, &target->luns, lun_link) {
3703 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3707 * Got the LUN in the target's LUN list. Fill it in with EEDP
3708 * info. If the READ CAP 16 command had some SCSI error (common
3709 * if command is not supported), mark the lun as not supporting
3710 * EEDP and set the block size to 0.
3712 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3713 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3714 lun->eedp_formatted = FALSE;
3715 lun->eedp_block_size = 0;
3719 if (rcap_buf->protect & 0x01) {
3720 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3721 "%d is formatted for EEDP support.\n",
3722 done_ccb->ccb_h.target_lun,
3723 done_ccb->ccb_h.target_id);
3724 lun->eedp_formatted = TRUE;
3725 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3730 // Finished with this CCB and path.
3731 free(rcap_buf, M_MPR);
3732 xpt_free_path(done_ccb->ccb_h.path);
3733 xpt_free_ccb(done_ccb);
3735 #endif /* (__FreeBSD_version < 901503) || \
3736 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3739 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3740 struct mprsas_target *target, lun_id_t lun_id)
3746 * Set the INRESET flag for this target so that no I/O will be sent to
3747 * the target until the reset has completed. If an I/O request does
3748 * happen, the devq will be frozen. The CCB holds the path which is
3749 * used to release the devq. The devq is released and the CCB is freed
3750 * when the TM completes.
3752 ccb = xpt_alloc_ccb_nowait();
3754 path_id = cam_sim_path(sc->sassc->sim);
3755 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3756 target->tid, lun_id) != CAM_REQ_CMP) {
3760 tm->cm_targ = target;
3761 target->flags |= MPRSAS_TARGET_INRESET;
3767 mprsas_startup(struct mpr_softc *sc)
3770 * Send the port enable message and set the wait_for_port_enable flag.
3771 * This flag helps to keep the simq frozen until all discovery events
3774 sc->wait_for_port_enable = 1;
3775 mprsas_send_portenable(sc);
3780 mprsas_send_portenable(struct mpr_softc *sc)
3782 MPI2_PORT_ENABLE_REQUEST *request;
3783 struct mpr_command *cm;
3787 if ((cm = mpr_alloc_command(sc)) == NULL)
3789 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3790 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3791 request->MsgFlags = 0;
3793 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3794 cm->cm_complete = mprsas_portenable_complete;
3798 mpr_map_command(sc, cm);
3799 mpr_dprint(sc, MPR_XINFO,
3800 "mpr_send_portenable finished cm %p req %p complete %p\n",
3801 cm, cm->cm_req, cm->cm_complete);
3806 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3808 MPI2_PORT_ENABLE_REPLY *reply;
3809 struct mprsas_softc *sassc;
3815 * Currently there should be no way we can hit this case. It only
3816 * happens when we have a failure to allocate chain frames, and
3817 * port enable commands don't have S/G lists.
3819 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3820 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3821 "This should not happen!\n", __func__, cm->cm_flags);
3824 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3826 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3827 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3828 MPI2_IOCSTATUS_SUCCESS)
3829 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3831 mpr_free_command(sc, cm);
3832 if (sc->mpr_ich.ich_arg != NULL) {
3833 mpr_dprint(sc, MPR_XINFO, "disestablish config intrhook\n");
3834 config_intrhook_disestablish(&sc->mpr_ich);
3835 sc->mpr_ich.ich_arg = NULL;
3839 * Done waiting for port enable to complete. Decrement the refcount.
3840 * If refcount is 0, discovery is complete and a rescan of the bus can
3843 sc->wait_for_port_enable = 0;
3844 sc->port_enable_complete = 1;
3845 wakeup(&sc->port_enable_complete);
3846 mprsas_startup_decrement(sassc);
3850 mprsas_check_id(struct mprsas_softc *sassc, int id)
3852 struct mpr_softc *sc = sassc->sc;
3856 ids = &sc->exclude_ids[0];
3857 while((name = strsep(&ids, ",")) != NULL) {
3858 if (name[0] == '\0')
3860 if (strtol(name, NULL, 0) == (long)id)
3868 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3870 struct mprsas_softc *sassc;
3871 struct mprsas_lun *lun, *lun_tmp;
3872 struct mprsas_target *targ;
3877 * The number of targets is based on IOC Facts, so free all of
3878 * the allocated LUNs for each target and then the target buffer
3881 for (i=0; i< maxtargets; i++) {
3882 targ = &sassc->targets[i];
3883 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3887 free(sassc->targets, M_MPR);
3889 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3890 M_MPR, M_WAITOK|M_ZERO);
3891 if (!sassc->targets) {
3892 panic("%s failed to alloc targets with error %d\n",