2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for Avago Technologies (LSI) MPT3 */
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
75 #include <dev/nvme/nvme.h>
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
90 #define MPRSAS_DISCOVERY_TIMEOUT 20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
94 * static array to check SCSI OpCode for EEDP protection bits
96 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131 struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133 struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137 struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139 union ccb *done_ccb);
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143 struct mpr_command *cm);
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
156 struct mprsas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mprsas_startup_increment(struct mprsas_softc *sassc)
178 MPR_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mpr_dprint(sassc->sc, MPR_INIT,
184 "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
224 mprsas_rescan_target(sassc->sc, NULL);
227 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 sassc->startup_refcount);
232 /* The firmware requires us to stop sending commands when we're doing task
233 * management, so refcount the TMs and keep the simq frozen when any are in
237 mprsas_alloc_tm(struct mpr_softc *sc)
239 struct mpr_command *tm;
242 tm = mpr_alloc_high_priority_command(sc);
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
249 int target_id = 0xFFFFFFFF;
256 * For TM's the devq is frozen for the device. Unfreeze it here and
257 * free the resources used for freezing the devq. Must clear the
258 * INRESET flag as well or scsi I/O will not work.
260 if (tm->cm_targ != NULL) {
261 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 target_id = tm->cm_targ->tid;
265 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
267 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 xpt_free_path(tm->cm_ccb->ccb_h.path);
269 xpt_free_ccb(tm->cm_ccb);
272 mpr_free_high_priority_command(sc, tm);
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
278 struct mprsas_softc *sassc = sc->sassc;
280 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = xpt_alloc_ccb_nowait();
295 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
299 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
306 if (targetid == CAM_TARGET_WILDCARD)
307 ccb->ccb_h.func_code = XPT_SCAN_BUS;
309 ccb->ccb_h.func_code = XPT_SCAN_TGT;
311 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
326 /* No need to be in here if debugging isn't enabled */
327 if ((cm->cm_sc->mpr_debug & level) == 0)
330 sbuf_new(&sb, str, sizeof(str), 0);
334 if (cm->cm_ccb != NULL) {
335 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
337 sbuf_cat(&sb, path_str);
338 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 scsi_command_string(&cm->cm_ccb->csio, &sb);
340 sbuf_printf(&sb, "length %d ",
341 cm->cm_ccb->csio.dxfer_len);
344 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 cam_sim_name(cm->cm_sc->sassc->sim),
346 cam_sim_unit(cm->cm_sc->sassc->sim),
347 cam_sim_bus(cm->cm_sc->sassc->sim),
348 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
352 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 sbuf_vprintf(&sb, fmt, ap);
355 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
363 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 struct mprsas_target *targ;
369 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
374 /* XXX retry the remove after the diag reset completes? */
375 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 "0x%04x\n", __func__, handle);
377 mprsas_free_tm(sc, tm);
381 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 MPI2_IOCSTATUS_SUCCESS) {
383 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
387 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 le32toh(reply->TerminationCount));
389 mpr_free_reply(sc, tm->cm_reply_data);
390 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
392 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
396 * Don't clear target if remove fails because things will get confusing.
397 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 * this target id if possible, and so we can assign the same target id
399 * to this device if it comes back in the future.
401 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 MPI2_IOCSTATUS_SUCCESS) {
405 targ->encl_handle = 0x0;
406 targ->encl_level_valid = 0x0;
407 targ->encl_level = 0x0;
408 targ->connector_name[0] = ' ';
409 targ->connector_name[1] = ' ';
410 targ->connector_name[2] = ' ';
411 targ->connector_name[3] = ' ';
412 targ->encl_slot = 0x0;
413 targ->exp_dev_handle = 0x0;
415 targ->linkrate = 0x0;
418 targ->scsi_req_desc_type = 0;
421 mprsas_free_tm(sc, tm);
426 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427 * Otherwise Volume Delete is same as Bare Drive Removal.
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
432 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 struct mpr_softc *sc;
434 struct mpr_command *cm;
435 struct mprsas_target *targ = NULL;
437 MPR_FUNCTRACE(sassc->sc);
440 targ = mprsas_find_target_by_handle(sassc, 0, handle);
442 /* FIXME: what is the action? */
443 /* We don't know about this device? */
444 mpr_dprint(sc, MPR_ERROR,
445 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449 targ->flags |= MPRSAS_TARGET_INREMOVAL;
451 cm = mprsas_alloc_tm(sc);
453 mpr_dprint(sc, MPR_ERROR,
454 "%s: command alloc failure\n", __func__);
458 mprsas_rescan_target(sc, targ);
460 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 req->DevHandle = targ->handle;
462 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
465 /* SAS Hard Link Reset / SATA Link Reset */
466 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470 cm->cm_desc.HighPriority.RequestFlags =
471 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 cm->cm_complete = mprsas_remove_volume;
473 cm->cm_complete_data = (void *)(uintptr_t)handle;
475 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 __func__, targ->tid);
477 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
479 mpr_map_command(sc, cm);
483 * The firmware performs debounce on the link to avoid transient link errors
484 * and false removals. When it does decide that link has been lost and a
485 * device needs to go away, it expects that the host will perform a target reset
486 * and then an op remove. The reset has the side-effect of aborting any
487 * outstanding requests for the device, which is required for the op-remove to
488 * succeed. It's not clear if the host should check for the device coming back
489 * alive after the reset.
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
494 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 struct mpr_softc *sc;
496 struct mpr_command *cm;
497 struct mprsas_target *targ = NULL;
499 MPR_FUNCTRACE(sassc->sc);
503 targ = mprsas_find_target_by_handle(sassc, 0, handle);
505 /* FIXME: what is the action? */
506 /* We don't know about this device? */
507 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
512 targ->flags |= MPRSAS_TARGET_INREMOVAL;
514 cm = mprsas_alloc_tm(sc);
516 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
521 mprsas_rescan_target(sc, targ);
523 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 memset(req, 0, sizeof(*req));
525 req->DevHandle = htole16(targ->handle);
526 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
529 /* SAS Hard Link Reset / SATA Link Reset */
530 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
534 cm->cm_desc.HighPriority.RequestFlags =
535 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 cm->cm_complete = mprsas_remove_device;
537 cm->cm_complete_data = (void *)(uintptr_t)handle;
539 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 __func__, targ->tid);
541 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
543 mpr_map_command(sc, cm);
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
549 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 struct mprsas_target *targ;
552 struct mpr_command *next_cm;
557 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
562 * Currently there should be no way we can hit this case. It only
563 * happens when we have a failure to allocate chain frames, and
564 * task management commands don't have S/G lists.
566 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 "handle %#04x! This should not happen!\n", __func__,
569 tm->cm_flags, handle);
573 /* XXX retry the remove after the diag reset completes? */
574 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 "0x%04x\n", __func__, handle);
576 mprsas_free_tm(sc, tm);
580 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 MPI2_IOCSTATUS_SUCCESS) {
582 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
586 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 le32toh(reply->TerminationCount));
588 mpr_free_reply(sc, tm->cm_reply_data);
589 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
591 /* Reuse the existing command */
592 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 memset(req, 0, sizeof(*req));
594 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 req->DevHandle = htole16(handle);
598 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 tm->cm_complete = mprsas_remove_complete;
600 tm->cm_complete_data = (void *)(uintptr_t)handle;
602 mpr_map_command(sc, tm);
604 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
606 if (targ->encl_level_valid) {
607 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 targ->connector_name);
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mprsas_scsiio_complete(sc, tm);
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
626 struct mprsas_target *targ;
627 struct mprsas_lun *lun;
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
639 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 "handle %#04x! This should not happen!\n", __func__,
642 tm->cm_flags, handle);
643 mprsas_free_tm(sc, tm);
648 /* most likely a chip reset */
649 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 "0x%04x\n", __func__, handle);
651 mprsas_free_tm(sc, tm);
655 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 __func__, handle, le16toh(reply->IOCStatus));
659 * Don't clear target if remove fails because things will get confusing.
660 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 * this target id if possible, and so we can assign the same target id
662 * to this device if it comes back in the future.
664 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 MPI2_IOCSTATUS_SUCCESS) {
668 targ->encl_handle = 0x0;
669 targ->encl_level_valid = 0x0;
670 targ->encl_level = 0x0;
671 targ->connector_name[0] = ' ';
672 targ->connector_name[1] = ' ';
673 targ->connector_name[2] = ' ';
674 targ->connector_name[3] = ' ';
675 targ->encl_slot = 0x0;
676 targ->exp_dev_handle = 0x0;
678 targ->linkrate = 0x0;
681 targ->scsi_req_desc_type = 0;
683 while (!SLIST_EMPTY(&targ->luns)) {
684 lun = SLIST_FIRST(&targ->luns);
685 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
690 mprsas_free_tm(sc, tm);
694 mprsas_register_events(struct mpr_softc *sc)
699 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 setbit(events, MPI2_EVENT_IR_VOLUME);
708 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
720 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 &sc->sassc->mprsas_eh);
727 mpr_attach_sas(struct mpr_softc *sc)
729 struct mprsas_softc *sassc;
734 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
736 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
738 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
739 "Cannot allocate SAS subsystem memory\n");
744 * XXX MaxTargets could change during a reinit. Since we don't
745 * resize the targets[] array during such an event, cache the value
746 * of MaxTargets here so that we don't get into trouble later. This
747 * should move into the reinit logic.
749 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
750 sassc->targets = malloc(sizeof(struct mprsas_target) *
751 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
752 if (!sassc->targets) {
753 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
754 "Cannot allocate SAS target memory\n");
761 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
762 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
767 unit = device_get_unit(sc->mpr_dev);
768 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
769 unit, &sc->mpr_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
770 if (sassc->sim == NULL) {
771 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
776 TAILQ_INIT(&sassc->ev_queue);
778 /* Initialize taskqueue for Event Handling */
779 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
780 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
781 taskqueue_thread_enqueue, &sassc->ev_tq);
782 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
783 device_get_nameunit(sc->mpr_dev));
788 * XXX There should be a bus for every port on the adapter, but since
789 * we're just going to fake the topology for now, we'll pretend that
790 * everything is just a target on a single bus.
792 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
793 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
794 "Error %d registering SCSI bus\n", error);
800 * Assume that discovery events will start right away.
802 * Hold off boot until discovery is complete.
804 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
805 sc->sassc->startup_refcount = 0;
806 mprsas_startup_increment(sassc);
808 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
811 * Register for async events so we can determine the EEDP
812 * capabilities of devices.
814 status = xpt_create_path(&sassc->path, /*periph*/NULL,
815 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
817 if (status != CAM_REQ_CMP) {
818 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
819 "Error %#x creating sim path\n", status);
824 #if (__FreeBSD_version >= 1000006) || \
825 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
826 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
828 event = AC_FOUND_DEVICE;
832 * Prior to the CAM locking improvements, we can't call
833 * xpt_register_async() with a particular path specified.
835 * If a path isn't specified, xpt_register_async() will
836 * generate a wildcard path and acquire the XPT lock while
837 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
838 * It will then drop the XPT lock once that is done.
840 * If a path is specified for xpt_register_async(), it will
841 * not acquire and drop the XPT lock around the call to
842 * xpt_action(). xpt_action() asserts that the caller
843 * holds the SIM lock, so the SIM lock has to be held when
844 * calling xpt_register_async() when the path is specified.
846 * But xpt_register_async calls xpt_for_all_devices(),
847 * which calls xptbustraverse(), which will acquire each
848 * SIM lock. When it traverses our particular bus, it will
849 * necessarily acquire the SIM lock, which will lead to a
850 * recursive lock acquisition.
852 * The CAM locking changes fix this problem by acquiring
853 * the XPT topology lock around bus traversal in
854 * xptbustraverse(), so the caller can hold the SIM lock
855 * and it does not cause a recursive lock acquisition.
857 * These __FreeBSD_version values are approximate, especially
858 * for stable/10, which is two months later than the actual
862 #if (__FreeBSD_version < 1000703) || \
863 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
865 status = xpt_register_async(event, mprsas_async, sc,
869 status = xpt_register_async(event, mprsas_async, sc,
873 if (status != CAM_REQ_CMP) {
874 mpr_dprint(sc, MPR_ERROR,
875 "Error %#x registering async handler for "
876 "AC_ADVINFO_CHANGED events\n", status);
877 xpt_free_path(sassc->path);
881 if (status != CAM_REQ_CMP) {
883 * EEDP use is the exception, not the rule.
884 * Warn the user, but do not fail to attach.
886 mpr_printf(sc, "EEDP capabilities disabled.\n");
891 mprsas_register_events(sc);
896 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
901 mpr_detach_sas(struct mpr_softc *sc)
903 struct mprsas_softc *sassc;
904 struct mprsas_lun *lun, *lun_tmp;
905 struct mprsas_target *targ;
910 if (sc->sassc == NULL)
914 mpr_deregister_events(sc, sassc->mprsas_eh);
917 * Drain and free the event handling taskqueue with the lock
918 * unheld so that any parallel processing tasks drain properly
919 * without deadlocking.
921 if (sassc->ev_tq != NULL)
922 taskqueue_free(sassc->ev_tq);
924 /* Make sure CAM doesn't wedge if we had to bail out early. */
927 while (sassc->startup_refcount != 0)
928 mprsas_startup_decrement(sassc);
930 /* Deregister our async handler */
931 if (sassc->path != NULL) {
932 xpt_register_async(0, mprsas_async, sc, sassc->path);
933 xpt_free_path(sassc->path);
937 if (sassc->flags & MPRSAS_IN_STARTUP)
938 xpt_release_simq(sassc->sim, 1);
940 if (sassc->sim != NULL) {
941 xpt_bus_deregister(cam_sim_path(sassc->sim));
942 cam_sim_free(sassc->sim, FALSE);
947 if (sassc->devq != NULL)
948 cam_simq_free(sassc->devq);
950 for (i = 0; i < sassc->maxtargets; i++) {
951 targ = &sassc->targets[i];
952 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
956 free(sassc->targets, M_MPR);
964 mprsas_discovery_end(struct mprsas_softc *sassc)
966 struct mpr_softc *sc = sassc->sc;
970 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
971 callout_stop(&sassc->discovery_callout);
974 * After discovery has completed, check the mapping table for any
975 * missing devices and update their missing counts. Only do this once
976 * whenever the driver is initialized so that missing counts aren't
977 * updated unnecessarily. Note that just because discovery has
978 * completed doesn't mean that events have been processed yet. The
979 * check_devices function is a callout timer that checks if ALL devices
980 * are missing. If so, it will wait a little longer for events to
981 * complete and keep resetting itself until some device in the mapping
982 * table is not missing, meaning that event processing has started.
984 if (sc->track_mapping_events) {
985 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
986 "completed. Check for missing devices in the mapping "
988 callout_reset(&sc->device_check_callout,
989 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
995 mprsas_action(struct cam_sim *sim, union ccb *ccb)
997 struct mprsas_softc *sassc;
999 sassc = cam_sim_softc(sim);
1001 MPR_FUNCTRACE(sassc->sc);
1002 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1003 ccb->ccb_h.func_code);
1004 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1006 switch (ccb->ccb_h.func_code) {
1009 struct ccb_pathinq *cpi = &ccb->cpi;
1010 struct mpr_softc *sc = sassc->sc;
1011 uint8_t sges_per_frame;
1013 cpi->version_num = 1;
1014 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1015 cpi->target_sprt = 0;
1016 #if (__FreeBSD_version >= 1000039) || \
1017 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1018 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1020 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1022 cpi->hba_eng_cnt = 0;
1023 cpi->max_target = sassc->maxtargets - 1;
1027 * initiator_id is set here to an ID outside the set of valid
1028 * target IDs (including volumes).
1030 cpi->initiator_id = sassc->maxtargets;
1031 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1032 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1033 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1034 cpi->unit_number = cam_sim_unit(sim);
1035 cpi->bus_id = cam_sim_bus(sim);
1037 * XXXSLM-I think this needs to change based on config page or
1038 * something instead of hardcoded to 150000.
1040 cpi->base_transfer_speed = 150000;
1041 cpi->transport = XPORT_SAS;
1042 cpi->transport_version = 0;
1043 cpi->protocol = PROTO_SCSI;
1044 cpi->protocol_version = SCSI_REV_SPC;
1047 * Max IO Size is Page Size * the following:
1048 * ((SGEs per frame - 1 for chain element) *
1049 * Max Chain Depth) + 1 for no chain needed in last frame
1051 * If user suggests a Max IO size to use, use the smaller of the
1052 * user's value and the calculated value as long as the user's
1053 * value is larger than 0. The user's value is in pages.
1055 sges_per_frame = (sc->chain_frame_size /
1056 sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1057 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1058 cpi->maxio *= PAGE_SIZE;
1059 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1061 cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1062 sc->maxio = cpi->maxio;
1063 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1066 case XPT_GET_TRAN_SETTINGS:
1068 struct ccb_trans_settings *cts;
1069 struct ccb_trans_settings_sas *sas;
1070 struct ccb_trans_settings_scsi *scsi;
1071 struct mprsas_target *targ;
1074 sas = &cts->xport_specific.sas;
1075 scsi = &cts->proto_specific.scsi;
1077 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1078 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1079 cts->ccb_h.target_id));
1080 targ = &sassc->targets[cts->ccb_h.target_id];
1081 if (targ->handle == 0x0) {
1082 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1086 cts->protocol_version = SCSI_REV_SPC2;
1087 cts->transport = XPORT_SAS;
1088 cts->transport_version = 0;
1090 sas->valid = CTS_SAS_VALID_SPEED;
1091 switch (targ->linkrate) {
1093 sas->bitrate = 150000;
1096 sas->bitrate = 300000;
1099 sas->bitrate = 600000;
1102 sas->bitrate = 1200000;
1108 cts->protocol = PROTO_SCSI;
1109 scsi->valid = CTS_SCSI_VALID_TQ;
1110 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1112 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1115 case XPT_CALC_GEOMETRY:
1116 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1117 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1120 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1122 mprsas_action_resetdev(sassc, ccb);
1127 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1128 "for abort or reset\n");
1129 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1132 mprsas_action_scsiio(sassc, ccb);
1134 #if __FreeBSD_version >= 900026
1136 mprsas_action_smpio(sassc, ccb);
1140 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1148 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1149 target_id_t target_id, lun_id_t lun_id)
1151 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1152 struct cam_path *path;
1154 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1155 ac_code, target_id, (uintmax_t)lun_id);
1157 if (xpt_create_path(&path, NULL,
1158 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1159 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1164 xpt_async(ac_code, path, NULL);
1165 xpt_free_path(path);
1169 mprsas_complete_all_commands(struct mpr_softc *sc)
1171 struct mpr_command *cm;
1176 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1178 /* complete all commands with a NULL reply */
1179 for (i = 1; i < sc->num_reqs; i++) {
1180 cm = &sc->commands[i];
1181 cm->cm_reply = NULL;
1184 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1185 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1187 if (cm->cm_complete != NULL) {
1188 mprsas_log_command(cm, MPR_RECOVERY,
1189 "completing cm %p state %x ccb %p for diag reset\n",
1190 cm, cm->cm_state, cm->cm_ccb);
1191 cm->cm_complete(sc, cm);
1195 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1196 mprsas_log_command(cm, MPR_RECOVERY,
1197 "waking up cm %p state %x ccb %p for diag reset\n",
1198 cm, cm->cm_state, cm->cm_ccb);
1203 if (cm->cm_sc->io_cmds_active != 0)
1204 cm->cm_sc->io_cmds_active--;
1206 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1207 /* this should never happen, but if it does, log */
1208 mprsas_log_command(cm, MPR_RECOVERY,
1209 "cm %p state %x flags 0x%x ccb %p during diag "
1210 "reset\n", cm, cm->cm_state, cm->cm_flags,
1217 mprsas_handle_reinit(struct mpr_softc *sc)
1221 /* Go back into startup mode and freeze the simq, so that CAM
1222 * doesn't send any commands until after we've rediscovered all
1223 * targets and found the proper device handles for them.
1225 * After the reset, portenable will trigger discovery, and after all
1226 * discovery-related activities have finished, the simq will be
1229 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1230 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1231 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1232 mprsas_startup_increment(sc->sassc);
1234 /* notify CAM of a bus reset */
1235 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1238 /* complete and cleanup after all outstanding commands */
1239 mprsas_complete_all_commands(sc);
1241 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1242 __func__, sc->sassc->startup_refcount);
1244 /* zero all the target handles, since they may change after the
1245 * reset, and we have to rediscover all the targets and use the new
1248 for (i = 0; i < sc->sassc->maxtargets; i++) {
1249 if (sc->sassc->targets[i].outstanding != 0)
1250 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1251 i, sc->sassc->targets[i].outstanding);
1252 sc->sassc->targets[i].handle = 0x0;
1253 sc->sassc->targets[i].exp_dev_handle = 0x0;
1254 sc->sassc->targets[i].outstanding = 0;
1255 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1259 mprsas_tm_timeout(void *data)
1261 struct mpr_command *tm = data;
1262 struct mpr_softc *sc = tm->cm_sc;
1264 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1266 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1272 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1274 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1275 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1276 unsigned int cm_count = 0;
1277 struct mpr_command *cm;
1278 struct mprsas_target *targ;
1280 callout_stop(&tm->cm_callout);
1282 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1283 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1287 * Currently there should be no way we can hit this case. It only
1288 * happens when we have a failure to allocate chain frames, and
1289 * task management commands don't have S/G lists.
1291 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1292 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1293 "%s: cm_flags = %#x for LUN reset! "
1294 "This should not happen!\n", __func__, tm->cm_flags);
1295 mprsas_free_tm(sc, tm);
1299 if (reply == NULL) {
1300 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1302 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1303 /* this completion was due to a reset, just cleanup */
1304 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1305 "reset, ignoring NULL LUN reset reply\n");
1307 mprsas_free_tm(sc, tm);
1310 /* we should have gotten a reply. */
1311 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1312 "LUN reset attempt, resetting controller\n");
1318 mpr_dprint(sc, MPR_RECOVERY,
1319 "logical unit reset status 0x%x code 0x%x count %u\n",
1320 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1321 le32toh(reply->TerminationCount));
1324 * See if there are any outstanding commands for this LUN.
1325 * This could be made more efficient by using a per-LU data
1326 * structure of some sort.
1328 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1329 if (cm->cm_lun == tm->cm_lun)
1333 if (cm_count == 0) {
1334 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1335 "Finished recovery after LUN reset for target %u\n",
1338 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1342 * We've finished recovery for this logical unit. check and
1343 * see if some other logical unit has a timedout command
1344 * that needs to be processed.
1346 cm = TAILQ_FIRST(&targ->timedout_commands);
1348 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1349 "More commands to abort for target %u\n", targ->tid);
1350 mprsas_send_abort(sc, tm, cm);
1353 mprsas_free_tm(sc, tm);
1356 /* if we still have commands for this LUN, the reset
1357 * effectively failed, regardless of the status reported.
1358 * Escalate to a target reset.
1360 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1361 "logical unit reset complete for target %u, but still "
1362 "have %u command(s), sending target reset\n", targ->tid,
1364 mprsas_send_reset(sc, tm,
1365 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1370 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1372 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1373 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1374 struct mprsas_target *targ;
1376 callout_stop(&tm->cm_callout);
1378 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1379 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1383 * Currently there should be no way we can hit this case. It only
1384 * happens when we have a failure to allocate chain frames, and
1385 * task management commands don't have S/G lists.
1387 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1388 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1389 "reset! This should not happen!\n", __func__, tm->cm_flags);
1390 mprsas_free_tm(sc, tm);
1394 if (reply == NULL) {
1395 mpr_dprint(sc, MPR_RECOVERY,
1396 "NULL target reset reply for tm %p TaskMID %u\n",
1397 tm, le16toh(req->TaskMID));
1398 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1399 /* this completion was due to a reset, just cleanup */
1400 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1401 "reset, ignoring NULL target reset reply\n");
1403 mprsas_free_tm(sc, tm);
1406 /* we should have gotten a reply. */
1407 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1408 "target reset attempt, resetting controller\n");
1414 mpr_dprint(sc, MPR_RECOVERY,
1415 "target reset status 0x%x code 0x%x count %u\n",
1416 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1417 le32toh(reply->TerminationCount));
1419 if (targ->outstanding == 0) {
1421 * We've finished recovery for this target and all
1422 * of its logical units.
1424 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1425 "Finished reset recovery for target %u\n", targ->tid);
1427 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1431 mprsas_free_tm(sc, tm);
1434 * After a target reset, if this target still has
1435 * outstanding commands, the reset effectively failed,
1436 * regardless of the status reported. escalate.
1438 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1439 "Target reset complete for target %u, but still have %u "
1440 "command(s), resetting controller\n", targ->tid,
1446 #define MPR_RESET_TIMEOUT 30
1449 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1451 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1452 struct mprsas_target *target;
1455 target = tm->cm_targ;
1456 if (target->handle == 0) {
1457 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1458 "%d\n", __func__, target->tid);
1462 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1463 req->DevHandle = htole16(target->handle);
1464 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1465 req->TaskType = type;
1467 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1468 /* XXX Need to handle invalid LUNs */
1469 MPR_SET_LUN(req->LUN, tm->cm_lun);
1470 tm->cm_targ->logical_unit_resets++;
1471 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1472 "Sending logical unit reset to target %u lun %d\n",
1473 target->tid, tm->cm_lun);
1474 tm->cm_complete = mprsas_logical_unit_reset_complete;
1475 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1476 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1478 * Target reset method =
1479 * SAS Hard Link Reset / SATA Link Reset
1481 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1482 tm->cm_targ->target_resets++;
1483 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1484 "Sending target reset to target %u\n", target->tid);
1485 tm->cm_complete = mprsas_target_reset_complete;
1486 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1489 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1493 if (target->encl_level_valid) {
1494 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1495 "At enclosure level %d, slot %d, connector name (%4s)\n",
1496 target->encl_level, target->encl_slot,
1497 target->connector_name);
1501 tm->cm_desc.HighPriority.RequestFlags =
1502 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1503 tm->cm_complete_data = (void *)tm;
1505 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1506 mprsas_tm_timeout, tm);
1508 err = mpr_map_command(sc, tm);
1510 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1511 "error %d sending reset type %u\n", err, type);
1518 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1520 struct mpr_command *cm;
1521 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1522 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1523 struct mprsas_target *targ;
1525 callout_stop(&tm->cm_callout);
1527 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1528 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1532 * Currently there should be no way we can hit this case. It only
1533 * happens when we have a failure to allocate chain frames, and
1534 * task management commands don't have S/G lists.
1536 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1537 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1538 "cm_flags = %#x for abort %p TaskMID %u!\n",
1539 tm->cm_flags, tm, le16toh(req->TaskMID));
1540 mprsas_free_tm(sc, tm);
1544 if (reply == NULL) {
1545 mpr_dprint(sc, MPR_RECOVERY,
1546 "NULL abort reply for tm %p TaskMID %u\n",
1547 tm, le16toh(req->TaskMID));
1548 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1549 /* this completion was due to a reset, just cleanup */
1550 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1551 "reset, ignoring NULL abort reply\n");
1553 mprsas_free_tm(sc, tm);
1555 /* we should have gotten a reply. */
1556 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1557 "abort attempt, resetting controller\n");
1563 mpr_dprint(sc, MPR_RECOVERY,
1564 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1565 le16toh(req->TaskMID),
1566 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1567 le32toh(reply->TerminationCount));
1569 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1572 * if there are no more timedout commands, we're done with
1573 * error recovery for this target.
1575 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1576 "Finished abort recovery for target %u\n", targ->tid);
1578 mprsas_free_tm(sc, tm);
1579 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1580 /* abort success, but we have more timedout commands to abort */
1581 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1582 "Continuing abort recovery for target %u\n", targ->tid);
1583 mprsas_send_abort(sc, tm, cm);
1586 * we didn't get a command completion, so the abort
1587 * failed as far as we're concerned. escalate.
1589 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1590 "Abort failed for target %u, sending logical unit reset\n",
1593 mprsas_send_reset(sc, tm,
1594 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1598 #define MPR_ABORT_TIMEOUT 5
1601 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1602 struct mpr_command *cm)
1604 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1605 struct mprsas_target *targ;
1609 if (targ->handle == 0) {
1610 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1611 "%s null devhandle for target_id %d\n",
1612 __func__, cm->cm_ccb->ccb_h.target_id);
1616 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1617 "Aborting command %p\n", cm);
1619 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1620 req->DevHandle = htole16(targ->handle);
1621 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1622 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1624 /* XXX Need to handle invalid LUNs */
1625 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1627 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1630 tm->cm_desc.HighPriority.RequestFlags =
1631 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1632 tm->cm_complete = mprsas_abort_complete;
1633 tm->cm_complete_data = (void *)tm;
1634 tm->cm_targ = cm->cm_targ;
1635 tm->cm_lun = cm->cm_lun;
1637 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1638 mprsas_tm_timeout, tm);
1642 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1644 err = mpr_map_command(sc, tm);
1646 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1647 "error %d sending abort for cm %p SMID %u\n",
1648 err, cm, req->TaskMID);
1653 mprsas_scsiio_timeout(void *data)
1655 sbintime_t elapsed, now;
1657 struct mpr_softc *sc;
1658 struct mpr_command *cm;
1659 struct mprsas_target *targ;
1661 cm = (struct mpr_command *)data;
1667 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1669 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1672 * Run the interrupt handler to make sure it's not pending. This
1673 * isn't perfect because the command could have already completed
1674 * and been re-used, though this is unlikely.
1676 mpr_intr_locked(sc);
1677 if (cm->cm_state == MPR_CM_STATE_FREE) {
1678 mprsas_log_command(cm, MPR_XINFO,
1679 "SCSI command %p almost timed out\n", cm);
1683 if (cm->cm_ccb == NULL) {
1684 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1691 elapsed = now - ccb->ccb_h.qos.sim_data;
1692 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1693 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1694 targ->tid, targ->handle, ccb->ccb_h.timeout,
1695 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1696 if (targ->encl_level_valid) {
1697 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1698 "At enclosure level %d, slot %d, connector name (%4s)\n",
1699 targ->encl_level, targ->encl_slot, targ->connector_name);
1702 /* XXX first, check the firmware state, to see if it's still
1703 * operational. if not, do a diag reset.
1705 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1706 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1707 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1709 if (targ->tm != NULL) {
1710 /* target already in recovery, just queue up another
1711 * timedout command to be processed later.
1713 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1714 "processing by tm %p\n", cm, targ->tm);
1716 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1718 /* start recovery by aborting the first timedout command */
1719 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1720 "Sending abort to target %u for SMID %d\n", targ->tid,
1721 cm->cm_desc.Default.SMID);
1722 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1724 mprsas_send_abort(sc, targ->tm, cm);
1727 /* XXX queue this target up for recovery once a TM becomes
1728 * available. The firmware only has a limited number of
1729 * HighPriority credits for the high priority requests used
1730 * for task management, and we ran out.
1732 * Isilon: don't worry about this for now, since we have
1733 * more credits than disks in an enclosure, and limit
1734 * ourselves to one TM per target for recovery.
1736 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1737 "timedout cm %p failed to allocate a tm\n", cm);
1742 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1744 * Return 0 - for success,
1745 * 1 - to immediately return back the command with success status to CAM
1746 * negative value - to fallback to firmware path i.e. issue scsi unmap
1747 * to FW without any translation.
1750 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1751 union ccb *ccb, struct mprsas_target *targ)
1753 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1754 struct ccb_scsiio *csio;
1755 struct unmap_parm_list *plist;
1756 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1757 struct nvme_command *c;
1759 uint16_t ndesc, list_len, data_length;
1760 struct mpr_prp_page *prp_page_info;
1761 uint64_t nvme_dsm_ranges_dma_handle;
1764 #if __FreeBSD_version >= 1100103
1765 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1767 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1768 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1769 ccb->csio.cdb_io.cdb_ptr[8]);
1771 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1772 ccb->csio.cdb_io.cdb_bytes[8]);
1776 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1780 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1782 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1783 "save UNMAP data\n");
1787 /* Copy SCSI unmap data to a local buffer */
1788 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1790 /* return back the unmap command to CAM with success status,
1791 * if number of descripts is zero.
1793 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1795 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1796 "UNMAP cmd is Zero\n");
1801 data_length = ndesc * sizeof(struct nvme_dsm_range);
1802 if (data_length > targ->MDTS) {
1803 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1804 "Device's MDTS: %d\n", data_length, targ->MDTS);
1809 prp_page_info = mpr_alloc_prp_page(sc);
1810 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1811 "UNMAP command.\n", __func__));
1814 * Insert the allocated PRP page into the command's PRP page list. This
1815 * will be freed when the command is freed.
1817 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1819 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1820 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1822 bzero(nvme_dsm_ranges, data_length);
1824 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1825 * for each descriptors contained in SCSI UNMAP data.
1827 for (i = 0; i < ndesc; i++) {
1828 nvme_dsm_ranges[i].length =
1829 htole32(be32toh(plist->desc[i].nlb));
1830 nvme_dsm_ranges[i].starting_lba =
1831 htole64(be64toh(plist->desc[i].slba));
1832 nvme_dsm_ranges[i].attributes = 0;
1835 /* Build MPI2.6's NVMe Encapsulated Request Message */
1836 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1837 bzero(req, sizeof(*req));
1838 req->DevHandle = htole16(targ->handle);
1839 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1840 req->Flags = MPI26_NVME_FLAGS_WRITE;
1841 req->ErrorResponseBaseAddress.High =
1842 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1843 req->ErrorResponseBaseAddress.Low =
1844 htole32(cm->cm_sense_busaddr);
1845 req->ErrorResponseAllocationLength =
1846 htole16(sizeof(struct nvme_completion));
1847 req->EncapsulatedCommandLength =
1848 htole16(sizeof(struct nvme_command));
1849 req->DataLength = htole32(data_length);
1851 /* Build NVMe DSM command */
1852 c = (struct nvme_command *) req->NVMe_Command;
1853 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1854 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1855 c->cdw10 = htole32(ndesc - 1);
1856 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1858 cm->cm_length = data_length;
1861 cm->cm_complete = mprsas_scsiio_complete;
1862 cm->cm_complete_data = ccb;
1864 cm->cm_lun = csio->ccb_h.target_lun;
1867 cm->cm_desc.Default.RequestFlags =
1868 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1870 csio->ccb_h.qos.sim_data = sbinuptime();
1871 #if __FreeBSD_version >= 1000029
1872 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1873 mprsas_scsiio_timeout, cm, 0);
1874 #else //__FreeBSD_version < 1000029
1875 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1876 mprsas_scsiio_timeout, cm);
1877 #endif //__FreeBSD_version >= 1000029
1880 targ->outstanding++;
1881 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1882 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1884 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1885 __func__, cm, ccb, targ->outstanding);
1887 mpr_build_nvme_prp(sc, cm, req,
1888 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1889 mpr_map_command(sc, cm);
1897 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1899 MPI2_SCSI_IO_REQUEST *req;
1900 struct ccb_scsiio *csio;
1901 struct mpr_softc *sc;
1902 struct mprsas_target *targ;
1903 struct mprsas_lun *lun;
1904 struct mpr_command *cm;
1905 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1906 uint16_t eedp_flags;
1907 uint32_t mpi_control;
1912 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1915 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1916 ("Target %d out of bounds in XPT_SCSI_IO\n",
1917 csio->ccb_h.target_id));
1918 targ = &sassc->targets[csio->ccb_h.target_id];
1919 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1920 if (targ->handle == 0x0) {
1921 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1922 __func__, csio->ccb_h.target_id);
1923 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1927 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1928 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1929 "supported %u\n", __func__, csio->ccb_h.target_id);
1930 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1935 * Sometimes, it is possible to get a command that is not "In
1936 * Progress" and was actually aborted by the upper layer. Check for
1937 * this here and complete the command without error.
1939 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1940 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1941 "target %u\n", __func__, csio->ccb_h.target_id);
1946 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1947 * that the volume has timed out. We want volumes to be enumerated
1948 * until they are deleted/removed, not just failed.
1950 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1951 if (targ->devinfo == 0)
1952 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1954 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1959 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1960 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1961 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1967 * If target has a reset in progress, freeze the devq and return. The
1968 * devq will be released when the TM reset is finished.
1970 if (targ->flags & MPRSAS_TARGET_INRESET) {
1971 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1972 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1973 __func__, targ->tid);
1974 xpt_freeze_devq(ccb->ccb_h.path, 1);
1979 cm = mpr_alloc_command(sc);
1980 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1982 mpr_free_command(sc, cm);
1984 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1985 xpt_freeze_simq(sassc->sim, 1);
1986 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1988 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1989 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1994 /* For NVME device's issue UNMAP command directly to NVME drives by
1995 * constructing equivalent native NVMe DataSetManagement command.
1997 #if __FreeBSD_version >= 1100103
1998 scsi_opcode = scsiio_cdb_ptr(csio)[0];
2000 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2001 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2003 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2005 if (scsi_opcode == UNMAP &&
2007 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2008 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2009 if (rc == 1) { /* return command to CAM with success status */
2010 mpr_free_command(sc, cm);
2011 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2014 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2018 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2019 bzero(req, sizeof(*req));
2020 req->DevHandle = htole16(targ->handle);
2021 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2023 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2024 req->SenseBufferLength = MPR_SENSE_LEN;
2026 req->ChainOffset = 0;
2027 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2032 req->DataLength = htole32(csio->dxfer_len);
2033 req->BidirectionalDataLength = 0;
2034 req->IoFlags = htole16(csio->cdb_len);
2037 /* Note: BiDirectional transfers are not supported */
2038 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2040 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2041 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2044 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2045 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2049 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2053 if (csio->cdb_len == 32)
2054 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2056 * It looks like the hardware doesn't require an explicit tag
2057 * number for each transaction. SAM Task Management not supported
2060 switch (csio->tag_action) {
2061 case MSG_HEAD_OF_Q_TAG:
2062 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2064 case MSG_ORDERED_Q_TAG:
2065 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2068 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2070 case CAM_TAG_ACTION_NONE:
2071 case MSG_SIMPLE_Q_TAG:
2073 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2076 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2077 req->Control = htole32(mpi_control);
2079 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2080 mpr_free_command(sc, cm);
2081 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2086 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2087 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2089 KASSERT(csio->cdb_len <= IOCDBLEN,
2090 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2091 "is not set", csio->cdb_len));
2092 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2094 req->IoFlags = htole16(csio->cdb_len);
2097 * Check if EEDP is supported and enabled. If it is then check if the
2098 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2099 * is formatted for EEDP support. If all of this is true, set CDB up
2100 * for EEDP transfer.
2102 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2103 if (sc->eedp_enabled && eedp_flags) {
2104 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2105 if (lun->lun_id == csio->ccb_h.target_lun) {
2110 if ((lun != NULL) && (lun->eedp_formatted)) {
2111 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2112 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2113 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2114 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2115 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2117 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2119 req->EEDPFlags = htole16(eedp_flags);
2122 * If CDB less than 32, fill in Primary Ref Tag with
2123 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2124 * already there. Also, set protection bit. FreeBSD
2125 * currently does not support CDBs bigger than 16, but
2126 * the code doesn't hurt, and will be here for the
2129 if (csio->cdb_len != 32) {
2130 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2131 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2132 PrimaryReferenceTag;
2133 for (i = 0; i < 4; i++) {
2135 req->CDB.CDB32[lba_byte + i];
2138 req->CDB.EEDP32.PrimaryReferenceTag =
2140 CDB.EEDP32.PrimaryReferenceTag);
2141 req->CDB.EEDP32.PrimaryApplicationTagMask =
2143 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2147 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2148 req->EEDPFlags = htole16(eedp_flags);
2149 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2155 cm->cm_length = csio->dxfer_len;
2156 if (cm->cm_length != 0) {
2158 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2162 cm->cm_sge = &req->SGL;
2163 cm->cm_sglsize = (32 - 24) * 4;
2164 cm->cm_complete = mprsas_scsiio_complete;
2165 cm->cm_complete_data = ccb;
2167 cm->cm_lun = csio->ccb_h.target_lun;
2170 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2171 * and set descriptor type.
2173 if (targ->scsi_req_desc_type ==
2174 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2175 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2176 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2177 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2178 if (!sc->atomic_desc_capable) {
2179 cm->cm_desc.FastPathSCSIIO.DevHandle =
2180 htole16(targ->handle);
2183 cm->cm_desc.SCSIIO.RequestFlags =
2184 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2185 if (!sc->atomic_desc_capable)
2186 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2189 csio->ccb_h.qos.sim_data = sbinuptime();
2190 #if __FreeBSD_version >= 1000029
2191 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2192 mprsas_scsiio_timeout, cm, 0);
2193 #else //__FreeBSD_version < 1000029
2194 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2195 mprsas_scsiio_timeout, cm);
2196 #endif //__FreeBSD_version >= 1000029
2199 targ->outstanding++;
2200 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2201 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2203 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2204 __func__, cm, ccb, targ->outstanding);
2206 mpr_map_command(sc, cm);
2211 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2214 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2215 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2219 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2220 MPI2_IOCSTATUS_MASK;
2221 u8 scsi_state = mpi_reply->SCSIState;
2222 u8 scsi_status = mpi_reply->SCSIStatus;
2223 char *desc_ioc_state = NULL;
2224 char *desc_scsi_status = NULL;
2225 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2227 if (log_info == 0x31170000)
2230 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2232 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2235 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2236 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2237 if (targ->encl_level_valid) {
2238 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2239 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2240 targ->connector_name);
2244 * We can add more detail about underflow data here
2247 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2248 "scsi_state %b\n", desc_scsi_status, scsi_status,
2249 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2250 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2252 if (sc->mpr_debug & MPR_XINFO &&
2253 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2254 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2255 scsi_sense_print(csio);
2256 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2259 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2260 response_info = le32toh(mpi_reply->ResponseInfo);
2261 response_bytes = (u8 *)&response_info;
2262 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2264 mpr_describe_table(mpr_scsi_taskmgmt_string,
2265 response_bytes[0]));
2269 /** mprsas_nvme_trans_status_code
2271 * Convert Native NVMe command error status to
2272 * equivalent SCSI error status.
2274 * Returns appropriate scsi_status
2277 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2278 struct mpr_command *cm)
2280 u8 status = MPI2_SCSI_STATUS_GOOD;
2281 int skey, asc, ascq;
2282 union ccb *ccb = cm->cm_complete_data;
2283 int returned_sense_len;
2285 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2286 skey = SSD_KEY_ILLEGAL_REQUEST;
2287 asc = SCSI_ASC_NO_SENSE;
2288 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2290 switch (nvme_status.sct) {
2291 case NVME_SCT_GENERIC:
2292 switch (nvme_status.sc) {
2293 case NVME_SC_SUCCESS:
2294 status = MPI2_SCSI_STATUS_GOOD;
2295 skey = SSD_KEY_NO_SENSE;
2296 asc = SCSI_ASC_NO_SENSE;
2297 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2299 case NVME_SC_INVALID_OPCODE:
2300 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2301 skey = SSD_KEY_ILLEGAL_REQUEST;
2302 asc = SCSI_ASC_ILLEGAL_COMMAND;
2303 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2305 case NVME_SC_INVALID_FIELD:
2306 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2307 skey = SSD_KEY_ILLEGAL_REQUEST;
2308 asc = SCSI_ASC_INVALID_CDB;
2309 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2311 case NVME_SC_DATA_TRANSFER_ERROR:
2312 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2313 skey = SSD_KEY_MEDIUM_ERROR;
2314 asc = SCSI_ASC_NO_SENSE;
2315 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2317 case NVME_SC_ABORTED_POWER_LOSS:
2318 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2319 skey = SSD_KEY_ABORTED_COMMAND;
2320 asc = SCSI_ASC_WARNING;
2321 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2323 case NVME_SC_INTERNAL_DEVICE_ERROR:
2324 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2325 skey = SSD_KEY_HARDWARE_ERROR;
2326 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2327 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2329 case NVME_SC_ABORTED_BY_REQUEST:
2330 case NVME_SC_ABORTED_SQ_DELETION:
2331 case NVME_SC_ABORTED_FAILED_FUSED:
2332 case NVME_SC_ABORTED_MISSING_FUSED:
2333 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2334 skey = SSD_KEY_ABORTED_COMMAND;
2335 asc = SCSI_ASC_NO_SENSE;
2336 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2338 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2339 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2340 skey = SSD_KEY_ILLEGAL_REQUEST;
2341 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2342 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2344 case NVME_SC_LBA_OUT_OF_RANGE:
2345 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2346 skey = SSD_KEY_ILLEGAL_REQUEST;
2347 asc = SCSI_ASC_ILLEGAL_BLOCK;
2348 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2350 case NVME_SC_CAPACITY_EXCEEDED:
2351 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2352 skey = SSD_KEY_MEDIUM_ERROR;
2353 asc = SCSI_ASC_NO_SENSE;
2354 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2356 case NVME_SC_NAMESPACE_NOT_READY:
2357 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2358 skey = SSD_KEY_NOT_READY;
2359 asc = SCSI_ASC_LUN_NOT_READY;
2360 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2364 case NVME_SCT_COMMAND_SPECIFIC:
2365 switch (nvme_status.sc) {
2366 case NVME_SC_INVALID_FORMAT:
2367 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2368 skey = SSD_KEY_ILLEGAL_REQUEST;
2369 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2370 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2372 case NVME_SC_CONFLICTING_ATTRIBUTES:
2373 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2374 skey = SSD_KEY_ILLEGAL_REQUEST;
2375 asc = SCSI_ASC_INVALID_CDB;
2376 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2380 case NVME_SCT_MEDIA_ERROR:
2381 switch (nvme_status.sc) {
2382 case NVME_SC_WRITE_FAULTS:
2383 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2384 skey = SSD_KEY_MEDIUM_ERROR;
2385 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2386 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2388 case NVME_SC_UNRECOVERED_READ_ERROR:
2389 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2390 skey = SSD_KEY_MEDIUM_ERROR;
2391 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2392 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2394 case NVME_SC_GUARD_CHECK_ERROR:
2395 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2396 skey = SSD_KEY_MEDIUM_ERROR;
2397 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2398 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2400 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2401 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2402 skey = SSD_KEY_MEDIUM_ERROR;
2403 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2404 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2406 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2407 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2408 skey = SSD_KEY_MEDIUM_ERROR;
2409 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2410 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2412 case NVME_SC_COMPARE_FAILURE:
2413 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2414 skey = SSD_KEY_MISCOMPARE;
2415 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2416 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2418 case NVME_SC_ACCESS_DENIED:
2419 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2420 skey = SSD_KEY_ILLEGAL_REQUEST;
2421 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2422 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2428 returned_sense_len = sizeof(struct scsi_sense_data);
2429 if (returned_sense_len < ccb->csio.sense_len)
2430 ccb->csio.sense_resid = ccb->csio.sense_len -
2433 ccb->csio.sense_resid = 0;
2435 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2436 1, skey, asc, ascq, SSD_ELEM_NONE);
2437 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2442 /** mprsas_complete_nvme_unmap
2444 * Complete native NVMe command issued using NVMe Encapsulated
2448 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2450 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2451 struct nvme_completion *nvme_completion = NULL;
2452 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2454 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2455 if (le16toh(mpi_reply->ErrorResponseCount)){
2456 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2457 scsi_status = mprsas_nvme_trans_status_code(
2458 nvme_completion->status, cm);
2464 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2466 MPI2_SCSI_IO_REPLY *rep;
2468 struct ccb_scsiio *csio;
2469 struct mprsas_softc *sassc;
2470 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2471 u8 *TLR_bits, TLR_on, *scsi_cdb;
2474 struct mprsas_target *target;
2475 target_id_t target_id;
2478 mpr_dprint(sc, MPR_TRACE,
2479 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2480 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2481 cm->cm_targ->outstanding);
2483 callout_stop(&cm->cm_callout);
2484 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2487 ccb = cm->cm_complete_data;
2489 target_id = csio->ccb_h.target_id;
2490 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2492 * XXX KDM if the chain allocation fails, does it matter if we do
2493 * the sync and unload here? It is simpler to do it in every case,
2494 * assuming it doesn't cause problems.
2496 if (cm->cm_data != NULL) {
2497 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2498 dir = BUS_DMASYNC_POSTREAD;
2499 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2500 dir = BUS_DMASYNC_POSTWRITE;
2501 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2502 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2505 cm->cm_targ->completed++;
2506 cm->cm_targ->outstanding--;
2507 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2508 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2510 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2511 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2512 if (cm->cm_reply != NULL)
2513 mprsas_log_command(cm, MPR_RECOVERY,
2514 "completed timedout cm %p ccb %p during recovery "
2515 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2516 le16toh(rep->IOCStatus), rep->SCSIStatus,
2517 rep->SCSIState, le32toh(rep->TransferCount));
2519 mprsas_log_command(cm, MPR_RECOVERY,
2520 "completed timedout cm %p ccb %p during recovery\n",
2522 } else if (cm->cm_targ->tm != NULL) {
2523 if (cm->cm_reply != NULL)
2524 mprsas_log_command(cm, MPR_RECOVERY,
2525 "completed cm %p ccb %p during recovery "
2526 "ioc %x scsi %x state %x xfer %u\n",
2527 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2528 rep->SCSIStatus, rep->SCSIState,
2529 le32toh(rep->TransferCount));
2531 mprsas_log_command(cm, MPR_RECOVERY,
2532 "completed cm %p ccb %p during recovery\n",
2534 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2535 mprsas_log_command(cm, MPR_RECOVERY,
2536 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2539 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2541 * We ran into an error after we tried to map the command,
2542 * so we're getting a callback without queueing the command
2543 * to the hardware. So we set the status here, and it will
2544 * be retained below. We'll go through the "fast path",
2545 * because there can be no reply when we haven't actually
2546 * gone out to the hardware.
2548 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2551 * Currently the only error included in the mask is
2552 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2553 * chain frames. We need to freeze the queue until we get
2554 * a command that completed without this error, which will
2555 * hopefully have some chain frames attached that we can
2556 * use. If we wanted to get smarter about it, we would
2557 * only unfreeze the queue in this condition when we're
2558 * sure that we're getting some chain frames back. That's
2559 * probably unnecessary.
2561 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2562 xpt_freeze_simq(sassc->sim, 1);
2563 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2564 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2565 "freezing SIM queue\n");
2570 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2571 * flag, and use it in a few places in the rest of this function for
2572 * convenience. Use the macro if available.
2574 #if __FreeBSD_version >= 1100103
2575 scsi_cdb = scsiio_cdb_ptr(csio);
2577 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2578 scsi_cdb = csio->cdb_io.cdb_ptr;
2580 scsi_cdb = csio->cdb_io.cdb_bytes;
2584 * If this is a Start Stop Unit command and it was issued by the driver
2585 * during shutdown, decrement the refcount to account for all of the
2586 * commands that were sent. All SSU commands should be completed before
2587 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2590 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2591 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2595 /* Take the fast path to completion */
2596 if (cm->cm_reply == NULL) {
2597 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2598 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2599 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2601 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2602 csio->scsi_status = SCSI_STATUS_OK;
2604 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2605 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2606 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2607 mpr_dprint(sc, MPR_XINFO,
2608 "Unfreezing SIM queue\n");
2613 * There are two scenarios where the status won't be
2614 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2615 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2617 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2619 * Freeze the dev queue so that commands are
2620 * executed in the correct order after error
2623 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2624 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2626 mpr_free_command(sc, cm);
2631 target = &sassc->targets[target_id];
2632 if (scsi_cdb[0] == UNMAP &&
2634 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2635 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2636 csio->scsi_status = rep->SCSIStatus;
2639 mprsas_log_command(cm, MPR_XINFO,
2640 "ioc %x scsi %x state %x xfer %u\n",
2641 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2642 le32toh(rep->TransferCount));
2644 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2645 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2646 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2648 case MPI2_IOCSTATUS_SUCCESS:
2649 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2650 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2651 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2652 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2654 /* Completion failed at the transport level. */
2655 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2656 MPI2_SCSI_STATE_TERMINATED)) {
2657 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2661 /* In a modern packetized environment, an autosense failure
2662 * implies that there's not much else that can be done to
2663 * recover the command.
2665 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2666 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2671 * CAM doesn't care about SAS Response Info data, but if this is
2672 * the state check if TLR should be done. If not, clear the
2673 * TLR_bits for the target.
2675 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2676 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2677 == MPR_SCSI_RI_INVALID_FRAME)) {
2678 sc->mapping_table[target_id].TLR_bits =
2679 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2683 * Intentionally override the normal SCSI status reporting
2684 * for these two cases. These are likely to happen in a
2685 * multi-initiator environment, and we want to make sure that
2686 * CAM retries these commands rather than fail them.
2688 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2689 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2690 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2694 /* Handle normal status and sense */
2695 csio->scsi_status = rep->SCSIStatus;
2696 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2697 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2699 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2701 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2702 int sense_len, returned_sense_len;
2704 returned_sense_len = min(le32toh(rep->SenseCount),
2705 sizeof(struct scsi_sense_data));
2706 if (returned_sense_len < csio->sense_len)
2707 csio->sense_resid = csio->sense_len -
2710 csio->sense_resid = 0;
2712 sense_len = min(returned_sense_len,
2713 csio->sense_len - csio->sense_resid);
2714 bzero(&csio->sense_data, sizeof(csio->sense_data));
2715 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2716 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2720 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2721 * and it's page code 0 (Supported Page List), and there is
2722 * inquiry data, and this is for a sequential access device, and
2723 * the device is an SSP target, and TLR is supported by the
2724 * controller, turn the TLR_bits value ON if page 0x90 is
2727 if ((scsi_cdb[0] == INQUIRY) &&
2728 (scsi_cdb[1] & SI_EVPD) &&
2729 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2730 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2731 (csio->data_ptr != NULL) &&
2732 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2733 (sc->control_TLR) &&
2734 (sc->mapping_table[target_id].device_info &
2735 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2736 vpd_list = (struct scsi_vpd_supported_page_list *)
2738 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2739 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2740 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2741 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2742 alloc_len -= csio->resid;
2743 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2744 if (vpd_list->list[i] == 0x90) {
2752 * If this is a SATA direct-access end device, mark it so that
2753 * a SCSI StartStopUnit command will be sent to it when the
2754 * driver is being shutdown.
2756 if ((scsi_cdb[0] == INQUIRY) &&
2757 (csio->data_ptr != NULL) &&
2758 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2759 (sc->mapping_table[target_id].device_info &
2760 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2761 ((sc->mapping_table[target_id].device_info &
2762 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2763 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2764 target = &sassc->targets[target_id];
2765 target->supports_SSU = TRUE;
2766 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2770 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2771 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2773 * If devinfo is 0 this will be a volume. In that case don't
2774 * tell CAM that the volume is not there. We want volumes to
2775 * be enumerated until they are deleted/removed, not just
2778 if (cm->cm_targ->devinfo == 0)
2779 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2781 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2783 case MPI2_IOCSTATUS_INVALID_SGL:
2784 mpr_print_scsiio_cmd(sc, cm);
2785 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2787 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2789 * This is one of the responses that comes back when an I/O
2790 * has been aborted. If it is because of a timeout that we
2791 * initiated, just set the status to CAM_CMD_TIMEOUT.
2792 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2793 * command is the same (it gets retried, subject to the
2794 * retry counter), the only difference is what gets printed
2797 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2798 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2800 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2802 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2803 /* resid is ignored for this condition */
2805 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2807 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2808 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2810 * These can sometimes be transient transport-related
2811 * errors, and sometimes persistent drive-related errors.
2812 * We used to retry these without decrementing the retry
2813 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2814 * we hit a persistent drive problem that returns one of
2815 * these error codes, we would retry indefinitely. So,
2816 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2817 * count and avoid infinite retries. We're taking the
2818 * potential risk of flagging false failures in the event
2819 * of a topology-related error (e.g. a SAS expander problem
2820 * causes a command addressed to a drive to fail), but
2821 * avoiding getting into an infinite retry loop.
2823 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2824 mpr_dprint(sc, MPR_INFO,
2825 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2826 mpr_describe_table(mpr_iocstatus_string,
2827 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2828 target_id, cm->cm_desc.Default.SMID,
2829 le32toh(rep->IOCLogInfo));
2830 mpr_dprint(sc, MPR_XINFO,
2831 "SCSIStatus %x SCSIState %x xfercount %u\n",
2832 rep->SCSIStatus, rep->SCSIState,
2833 le32toh(rep->TransferCount));
2835 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2836 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2837 case MPI2_IOCSTATUS_INVALID_VPID:
2838 case MPI2_IOCSTATUS_INVALID_FIELD:
2839 case MPI2_IOCSTATUS_INVALID_STATE:
2840 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2841 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2842 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2843 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2844 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2846 mprsas_log_command(cm, MPR_XINFO,
2847 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2848 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2849 rep->SCSIStatus, rep->SCSIState,
2850 le32toh(rep->TransferCount));
2851 csio->resid = cm->cm_length;
2853 if (scsi_cdb[0] == UNMAP &&
2855 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2856 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2858 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2863 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2865 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2866 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2867 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2868 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2872 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2873 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2874 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2877 mpr_free_command(sc, cm);
2881 #if __FreeBSD_version >= 900026
2883 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2885 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2886 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2890 ccb = cm->cm_complete_data;
2893 * Currently there should be no way we can hit this case. It only
2894 * happens when we have a failure to allocate chain frames, and SMP
2895 * commands require two S/G elements only. That should be handled
2896 * in the standard request size.
2898 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2899 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2900 "request!\n", __func__, cm->cm_flags);
2901 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2905 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2907 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2908 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2912 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2913 sasaddr = le32toh(req->SASAddress.Low);
2914 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2916 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2917 MPI2_IOCSTATUS_SUCCESS ||
2918 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2919 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2920 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2921 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2925 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2926 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2928 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2929 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2931 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2935 * We sync in both directions because we had DMAs in the S/G list
2936 * in both directions.
2938 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2939 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2940 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2941 mpr_free_command(sc, cm);
2946 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2948 struct mpr_command *cm;
2949 uint8_t *request, *response;
2950 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2951 struct mpr_softc *sc;
2959 #if (__FreeBSD_version >= 1000028) || \
2960 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2961 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2962 case CAM_DATA_PADDR:
2963 case CAM_DATA_SG_PADDR:
2965 * XXX We don't yet support physical addresses here.
2967 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2968 "supported\n", __func__);
2969 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2974 * The chip does not support more than one buffer for the
2975 * request or response.
2977 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2978 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2979 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2980 "response buffer segments not supported for SMP\n",
2982 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2988 * The CAM_SCATTER_VALID flag was originally implemented
2989 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2990 * We have two. So, just take that flag to mean that we
2991 * might have S/G lists, and look at the S/G segment count
2992 * to figure out whether that is the case for each individual
2995 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2996 bus_dma_segment_t *req_sg;
2998 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2999 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3001 request = ccb->smpio.smp_request;
3003 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3004 bus_dma_segment_t *rsp_sg;
3006 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3007 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3009 response = ccb->smpio.smp_response;
3011 case CAM_DATA_VADDR:
3012 request = ccb->smpio.smp_request;
3013 response = ccb->smpio.smp_response;
3016 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3020 #else /* __FreeBSD_version < 1000028 */
3022 * XXX We don't yet support physical addresses here.
3024 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3025 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3026 "supported\n", __func__);
3027 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3033 * If the user wants to send an S/G list, check to make sure they
3034 * have single buffers.
3036 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3038 * The chip does not support more than one buffer for the
3039 * request or response.
3041 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3042 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3043 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3044 "response buffer segments not supported for SMP\n",
3046 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3052 * The CAM_SCATTER_VALID flag was originally implemented
3053 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3054 * We have two. So, just take that flag to mean that we
3055 * might have S/G lists, and look at the S/G segment count
3056 * to figure out whether that is the case for each individual
3059 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3060 bus_dma_segment_t *req_sg;
3062 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3063 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3065 request = ccb->smpio.smp_request;
3067 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3068 bus_dma_segment_t *rsp_sg;
3070 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3071 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3073 response = ccb->smpio.smp_response;
3075 request = ccb->smpio.smp_request;
3076 response = ccb->smpio.smp_response;
3078 #endif /* __FreeBSD_version < 1000028 */
3080 cm = mpr_alloc_command(sc);
3082 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3084 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3089 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3090 bzero(req, sizeof(*req));
3091 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3093 /* Allow the chip to use any route to this SAS address. */
3094 req->PhysicalPort = 0xff;
3096 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3098 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3100 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3101 "%#jx\n", __func__, (uintmax_t)sasaddr);
3103 mpr_init_sge(cm, req, &req->SGL);
3106 * Set up a uio to pass into mpr_map_command(). This allows us to
3107 * do one map command, and one busdma call in there.
3109 cm->cm_uio.uio_iov = cm->cm_iovec;
3110 cm->cm_uio.uio_iovcnt = 2;
3111 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3114 * The read/write flag isn't used by busdma, but set it just in
3115 * case. This isn't exactly accurate, either, since we're going in
3118 cm->cm_uio.uio_rw = UIO_WRITE;
3120 cm->cm_iovec[0].iov_base = request;
3121 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3122 cm->cm_iovec[1].iov_base = response;
3123 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3125 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3126 cm->cm_iovec[1].iov_len;
3129 * Trigger a warning message in mpr_data_cb() for the user if we
3130 * wind up exceeding two S/G segments. The chip expects one
3131 * segment for the request and another for the response.
3133 cm->cm_max_segs = 2;
3135 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3136 cm->cm_complete = mprsas_smpio_complete;
3137 cm->cm_complete_data = ccb;
3140 * Tell the mapping code that we're using a uio, and that this is
3141 * an SMP passthrough request. There is a little special-case
3142 * logic there (in mpr_data_cb()) to handle the bidirectional
3145 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3146 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3148 /* The chip data format is little endian. */
3149 req->SASAddress.High = htole32(sasaddr >> 32);
3150 req->SASAddress.Low = htole32(sasaddr);
3153 * XXX Note that we don't have a timeout/abort mechanism here.
3154 * From the manual, it looks like task management requests only
3155 * work for SCSI IO and SATA passthrough requests. We may need to
3156 * have a mechanism to retry requests in the event of a chip reset
3157 * at least. Hopefully the chip will insure that any errors short
3158 * of that are relayed back to the driver.
3160 error = mpr_map_command(sc, cm);
3161 if ((error != 0) && (error != EINPROGRESS)) {
3162 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3163 "mpr_map_command()\n", __func__, error);
3170 mpr_free_command(sc, cm);
3171 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3177 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3179 struct mpr_softc *sc;
3180 struct mprsas_target *targ;
3181 uint64_t sasaddr = 0;
3186 * Make sure the target exists.
3188 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3189 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3190 targ = &sassc->targets[ccb->ccb_h.target_id];
3191 if (targ->handle == 0x0) {
3192 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3193 __func__, ccb->ccb_h.target_id);
3194 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3200 * If this device has an embedded SMP target, we'll talk to it
3202 * figure out what the expander's address is.
3204 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3205 sasaddr = targ->sasaddr;
3208 * If we don't have a SAS address for the expander yet, try
3209 * grabbing it from the page 0x83 information cached in the
3210 * transport layer for this target. LSI expanders report the
3211 * expander SAS address as the port-associated SAS address in
3212 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3215 * XXX KDM disable this for now, but leave it commented out so that
3216 * it is obvious that this is another possible way to get the SAS
3219 * The parent handle method below is a little more reliable, and
3220 * the other benefit is that it works for devices other than SES
3221 * devices. So you can send a SMP request to a da(4) device and it
3222 * will get routed to the expander that device is attached to.
3223 * (Assuming the da(4) device doesn't contain an SMP target...)
3227 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3231 * If we still don't have a SAS address for the expander, look for
3232 * the parent device of this device, which is probably the expander.
3235 #ifdef OLD_MPR_PROBE
3236 struct mprsas_target *parent_target;
3239 if (targ->parent_handle == 0x0) {
3240 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3241 "a valid parent handle!\n", __func__, targ->handle);
3242 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3245 #ifdef OLD_MPR_PROBE
3246 parent_target = mprsas_find_target_by_handle(sassc, 0,
3247 targ->parent_handle);
3249 if (parent_target == NULL) {
3250 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3251 "a valid parent target!\n", __func__, targ->handle);
3252 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3256 if ((parent_target->devinfo &
3257 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3258 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3259 "does not have an SMP target!\n", __func__,
3260 targ->handle, parent_target->handle);
3261 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3265 sasaddr = parent_target->sasaddr;
3266 #else /* OLD_MPR_PROBE */
3267 if ((targ->parent_devinfo &
3268 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3269 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3270 "does not have an SMP target!\n", __func__,
3271 targ->handle, targ->parent_handle);
3272 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3276 if (targ->parent_sasaddr == 0x0) {
3277 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3278 "%d does not have a valid SAS address!\n", __func__,
3279 targ->handle, targ->parent_handle);
3280 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3284 sasaddr = targ->parent_sasaddr;
3285 #endif /* OLD_MPR_PROBE */
3290 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3291 "handle %d\n", __func__, targ->handle);
3292 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3295 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3303 #endif //__FreeBSD_version >= 900026
3306 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3308 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3309 struct mpr_softc *sc;
3310 struct mpr_command *tm;
3311 struct mprsas_target *targ;
3313 MPR_FUNCTRACE(sassc->sc);
3314 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3316 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3317 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3319 tm = mpr_alloc_command(sc);
3321 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3322 "mprsas_action_resetdev\n");
3323 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3328 targ = &sassc->targets[ccb->ccb_h.target_id];
3329 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3330 req->DevHandle = htole16(targ->handle);
3331 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3332 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3334 /* SAS Hard Link Reset / SATA Link Reset */
3335 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3338 tm->cm_desc.HighPriority.RequestFlags =
3339 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3340 tm->cm_complete = mprsas_resetdev_complete;
3341 tm->cm_complete_data = ccb;
3343 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3344 __func__, targ->tid);
3346 targ->flags |= MPRSAS_TARGET_INRESET;
3348 mpr_map_command(sc, tm);
3352 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3354 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3358 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3360 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3361 ccb = tm->cm_complete_data;
3364 * Currently there should be no way we can hit this case. It only
3365 * happens when we have a failure to allocate chain frames, and
3366 * task management commands don't have S/G lists.
3368 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3369 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3371 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3373 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3374 "handle %#04x! This should not happen!\n", __func__,
3375 tm->cm_flags, req->DevHandle);
3376 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3380 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3381 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3383 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3384 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3385 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3389 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3393 mprsas_free_tm(sc, tm);
3398 mprsas_poll(struct cam_sim *sim)
3400 struct mprsas_softc *sassc;
3402 sassc = cam_sim_softc(sim);
3404 if (sassc->sc->mpr_debug & MPR_TRACE) {
3405 /* frequent debug messages during a panic just slow
3406 * everything down too much.
3408 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3410 sassc->sc->mpr_debug &= ~MPR_TRACE;
3413 mpr_intr_locked(sassc->sc);
3417 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3420 struct mpr_softc *sc;
3422 sc = (struct mpr_softc *)callback_arg;
3425 #if (__FreeBSD_version >= 1000006) || \
3426 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3427 case AC_ADVINFO_CHANGED: {
3428 struct mprsas_target *target;
3429 struct mprsas_softc *sassc;
3430 struct scsi_read_capacity_data_long rcap_buf;
3431 struct ccb_dev_advinfo cdai;
3432 struct mprsas_lun *lun;
3437 buftype = (uintptr_t)arg;
3443 * We're only interested in read capacity data changes.
3445 if (buftype != CDAI_TYPE_RCAPLONG)
3449 * See the comment in mpr_attach_sas() for a detailed
3450 * explanation. In these versions of FreeBSD we register
3451 * for all events and filter out the events that don't
3454 #if (__FreeBSD_version < 1000703) || \
3455 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3456 if (xpt_path_path_id(path) != sassc->sim->path_id)
3461 * We should have a handle for this, but check to make sure.
3463 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3464 ("Target %d out of bounds in mprsas_async\n",
3465 xpt_path_target_id(path)));
3466 target = &sassc->targets[xpt_path_target_id(path)];
3467 if (target->handle == 0)
3470 lunid = xpt_path_lun_id(path);
3472 SLIST_FOREACH(lun, &target->luns, lun_link) {
3473 if (lun->lun_id == lunid) {
3479 if (found_lun == 0) {
3480 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3483 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3484 "LUN for EEDP support.\n");
3487 lun->lun_id = lunid;
3488 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3491 bzero(&rcap_buf, sizeof(rcap_buf));
3492 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3493 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3494 cdai.ccb_h.flags = CAM_DIR_IN;
3495 cdai.buftype = CDAI_TYPE_RCAPLONG;
3496 #if (__FreeBSD_version >= 1100061) || \
3497 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3498 cdai.flags = CDAI_FLAG_NONE;
3502 cdai.bufsiz = sizeof(rcap_buf);
3503 cdai.buf = (uint8_t *)&rcap_buf;
3504 xpt_action((union ccb *)&cdai);
3505 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3506 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3508 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3509 && (rcap_buf.prot & SRC16_PROT_EN)) {
3510 lun->eedp_formatted = TRUE;
3511 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3513 lun->eedp_formatted = FALSE;
3514 lun->eedp_block_size = 0;
3519 case AC_FOUND_DEVICE: {
3520 struct ccb_getdev *cgd;
3523 * See the comment in mpr_attach_sas() for a detailed
3524 * explanation. In these versions of FreeBSD we register
3525 * for all events and filter out the events that don't
3528 #if (__FreeBSD_version < 1000703) || \
3529 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3530 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3535 #if (__FreeBSD_version < 901503) || \
3536 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3537 mprsas_check_eedp(sc, path, cgd);
3546 #if (__FreeBSD_version < 901503) || \
3547 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3549 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3550 struct ccb_getdev *cgd)
3552 struct mprsas_softc *sassc = sc->sassc;
3553 struct ccb_scsiio *csio;
3554 struct scsi_read_capacity_16 *scsi_cmd;
3555 struct scsi_read_capacity_eedp *rcap_buf;
3557 target_id_t targetid;
3560 struct cam_path *local_path;
3561 struct mprsas_target *target;
3562 struct mprsas_lun *lun;
3566 pathid = cam_sim_path(sassc->sim);
3567 targetid = xpt_path_target_id(path);
3568 lunid = xpt_path_lun_id(path);
3570 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3571 "mprsas_check_eedp\n", targetid));
3572 target = &sassc->targets[targetid];
3573 if (target->handle == 0x0)
3577 * Determine if the device is EEDP capable.
3579 * If this flag is set in the inquiry data, the device supports
3580 * protection information, and must support the 16 byte read capacity
3581 * command, otherwise continue without sending read cap 16.
3583 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3587 * Issue a READ CAPACITY 16 command. This info is used to determine if
3588 * the LUN is formatted for EEDP support.
3590 ccb = xpt_alloc_ccb_nowait();
3592 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3597 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3599 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3606 * If LUN is already in list, don't create a new one.
3609 SLIST_FOREACH(lun, &target->luns, lun_link) {
3610 if (lun->lun_id == lunid) {
3616 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3619 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3621 xpt_free_path(local_path);
3625 lun->lun_id = lunid;
3626 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3629 xpt_path_string(local_path, path_str, sizeof(path_str));
3630 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3631 path_str, target->handle);
3634 * Issue a READ CAPACITY 16 command for the LUN. The
3635 * mprsas_read_cap_done function will load the read cap info into the
3638 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3640 if (rcap_buf == NULL) {
3641 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3642 "buffer for EEDP support.\n");
3643 xpt_free_path(ccb->ccb_h.path);
3647 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3649 csio->ccb_h.func_code = XPT_SCSI_IO;
3650 csio->ccb_h.flags = CAM_DIR_IN;
3651 csio->ccb_h.retry_count = 4;
3652 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3653 csio->ccb_h.timeout = 60000;
3654 csio->data_ptr = (uint8_t *)rcap_buf;
3655 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3656 csio->sense_len = MPR_SENSE_LEN;
3657 csio->cdb_len = sizeof(*scsi_cmd);
3658 csio->tag_action = MSG_SIMPLE_Q_TAG;
3660 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3661 bzero(scsi_cmd, sizeof(*scsi_cmd));
3662 scsi_cmd->opcode = 0x9E;
3663 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3664 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3666 ccb->ccb_h.ppriv_ptr1 = sassc;
3671 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3673 struct mprsas_softc *sassc;
3674 struct mprsas_target *target;
3675 struct mprsas_lun *lun;
3676 struct scsi_read_capacity_eedp *rcap_buf;
3678 if (done_ccb == NULL)
3681 /* Driver need to release devq, it Scsi command is
3682 * generated by driver internally.
3683 * Currently there is a single place where driver
3684 * calls scsi command internally. In future if driver
3685 * calls more scsi command internally, it needs to release
3686 * devq internally, since those command will not go back to
3689 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3690 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3691 xpt_release_devq(done_ccb->ccb_h.path,
3692 /*count*/ 1, /*run_queue*/TRUE);
3695 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3698 * Get the LUN ID for the path and look it up in the LUN list for the
3701 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3702 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3703 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3704 target = &sassc->targets[done_ccb->ccb_h.target_id];
3705 SLIST_FOREACH(lun, &target->luns, lun_link) {
3706 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3710 * Got the LUN in the target's LUN list. Fill it in with EEDP
3711 * info. If the READ CAP 16 command had some SCSI error (common
3712 * if command is not supported), mark the lun as not supporting
3713 * EEDP and set the block size to 0.
3715 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3716 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3717 lun->eedp_formatted = FALSE;
3718 lun->eedp_block_size = 0;
3722 if (rcap_buf->protect & 0x01) {
3723 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3724 "%d is formatted for EEDP support.\n",
3725 done_ccb->ccb_h.target_lun,
3726 done_ccb->ccb_h.target_id);
3727 lun->eedp_formatted = TRUE;
3728 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3733 // Finished with this CCB and path.
3734 free(rcap_buf, M_MPR);
3735 xpt_free_path(done_ccb->ccb_h.path);
3736 xpt_free_ccb(done_ccb);
3738 #endif /* (__FreeBSD_version < 901503) || \
3739 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3742 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3743 struct mprsas_target *target, lun_id_t lun_id)
3749 * Set the INRESET flag for this target so that no I/O will be sent to
3750 * the target until the reset has completed. If an I/O request does
3751 * happen, the devq will be frozen. The CCB holds the path which is
3752 * used to release the devq. The devq is released and the CCB is freed
3753 * when the TM completes.
3755 ccb = xpt_alloc_ccb_nowait();
3757 path_id = cam_sim_path(sc->sassc->sim);
3758 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3759 target->tid, lun_id) != CAM_REQ_CMP) {
3763 tm->cm_targ = target;
3764 target->flags |= MPRSAS_TARGET_INRESET;
3770 mprsas_startup(struct mpr_softc *sc)
3773 * Send the port enable message and set the wait_for_port_enable flag.
3774 * This flag helps to keep the simq frozen until all discovery events
3777 sc->wait_for_port_enable = 1;
3778 mprsas_send_portenable(sc);
3783 mprsas_send_portenable(struct mpr_softc *sc)
3785 MPI2_PORT_ENABLE_REQUEST *request;
3786 struct mpr_command *cm;
3790 if ((cm = mpr_alloc_command(sc)) == NULL)
3792 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3793 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3794 request->MsgFlags = 0;
3796 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3797 cm->cm_complete = mprsas_portenable_complete;
3801 mpr_map_command(sc, cm);
3802 mpr_dprint(sc, MPR_XINFO,
3803 "mpr_send_portenable finished cm %p req %p complete %p\n",
3804 cm, cm->cm_req, cm->cm_complete);
3809 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3811 MPI2_PORT_ENABLE_REPLY *reply;
3812 struct mprsas_softc *sassc;
3818 * Currently there should be no way we can hit this case. It only
3819 * happens when we have a failure to allocate chain frames, and
3820 * port enable commands don't have S/G lists.
3822 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3823 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3824 "This should not happen!\n", __func__, cm->cm_flags);
3827 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3829 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3830 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3831 MPI2_IOCSTATUS_SUCCESS)
3832 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3834 mpr_free_command(sc, cm);
3836 * Done waiting for port enable to complete. Decrement the refcount.
3837 * If refcount is 0, discovery is complete and a rescan of the bus can
3840 sc->wait_for_port_enable = 0;
3841 sc->port_enable_complete = 1;
3842 wakeup(&sc->port_enable_complete);
3843 mprsas_startup_decrement(sassc);
3847 mprsas_check_id(struct mprsas_softc *sassc, int id)
3849 struct mpr_softc *sc = sassc->sc;
3853 ids = &sc->exclude_ids[0];
3854 while((name = strsep(&ids, ",")) != NULL) {
3855 if (name[0] == '\0')
3857 if (strtol(name, NULL, 0) == (long)id)
3865 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3867 struct mprsas_softc *sassc;
3868 struct mprsas_lun *lun, *lun_tmp;
3869 struct mprsas_target *targ;
3874 * The number of targets is based on IOC Facts, so free all of
3875 * the allocated LUNs for each target and then the target buffer
3878 for (i=0; i< maxtargets; i++) {
3879 targ = &sassc->targets[i];
3880 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3884 free(sassc->targets, M_MPR);
3886 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3887 M_MPR, M_WAITOK|M_ZERO);
3888 if (!sassc->targets) {
3889 panic("%s failed to alloc targets with error %d\n",