2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for Avago Technologies (LSI) MPT3 */
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
75 #include <dev/nvme/nvme.h>
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
90 #define MPRSAS_DISCOVERY_TIMEOUT 20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
94 * static array to check SCSI OpCode for EEDP protection bits
96 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131 struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133 struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137 struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139 union ccb *done_ccb);
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143 struct mpr_command *cm);
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
156 struct mprsas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mprsas_startup_increment(struct mprsas_softc *sassc)
178 MPR_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mpr_dprint(sassc->sc, MPR_INIT,
184 "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
224 mprsas_rescan_target(sassc->sc, NULL);
227 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 sassc->startup_refcount);
232 /* The firmware requires us to stop sending commands when we're doing task
233 * management, so refcount the TMs and keep the simq frozen when any are in
237 mprsas_alloc_tm(struct mpr_softc *sc)
239 struct mpr_command *tm;
242 tm = mpr_alloc_high_priority_command(sc);
247 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
249 int target_id = 0xFFFFFFFF;
256 * For TM's the devq is frozen for the device. Unfreeze it here and
257 * free the resources used for freezing the devq. Must clear the
258 * INRESET flag as well or scsi I/O will not work.
260 if (tm->cm_targ != NULL) {
261 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
262 target_id = tm->cm_targ->tid;
265 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
267 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
268 xpt_free_path(tm->cm_ccb->ccb_h.path);
269 xpt_free_ccb(tm->cm_ccb);
272 mpr_free_high_priority_command(sc, tm);
276 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
278 struct mprsas_softc *sassc = sc->sassc;
280 target_id_t targetid;
284 pathid = cam_sim_path(sassc->sim);
286 targetid = CAM_TARGET_WILDCARD;
288 targetid = targ - sassc->targets;
291 * Allocate a CCB and schedule a rescan.
293 ccb = xpt_alloc_ccb_nowait();
295 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
299 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
300 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
301 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
306 if (targetid == CAM_TARGET_WILDCARD)
307 ccb->ccb_h.func_code = XPT_SCAN_BUS;
309 ccb->ccb_h.func_code = XPT_SCAN_TGT;
311 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
316 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
326 /* No need to be in here if debugging isn't enabled */
327 if ((cm->cm_sc->mpr_debug & level) == 0)
330 sbuf_new(&sb, str, sizeof(str), 0);
334 if (cm->cm_ccb != NULL) {
335 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
337 sbuf_cat(&sb, path_str);
338 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
339 scsi_command_string(&cm->cm_ccb->csio, &sb);
340 sbuf_printf(&sb, "length %d ",
341 cm->cm_ccb->csio.dxfer_len);
344 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
345 cam_sim_name(cm->cm_sc->sassc->sim),
346 cam_sim_unit(cm->cm_sc->sassc->sim),
347 cam_sim_bus(cm->cm_sc->sassc->sim),
348 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
352 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
353 sbuf_vprintf(&sb, fmt, ap);
355 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
361 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
363 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 struct mprsas_target *targ;
369 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
374 /* XXX retry the remove after the diag reset completes? */
375 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
376 "0x%04x\n", __func__, handle);
377 mprsas_free_tm(sc, tm);
381 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
382 MPI2_IOCSTATUS_SUCCESS) {
383 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
384 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
387 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
388 le32toh(reply->TerminationCount));
389 mpr_free_reply(sc, tm->cm_reply_data);
390 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
392 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
396 * Don't clear target if remove fails because things will get confusing.
397 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 * this target id if possible, and so we can assign the same target id
399 * to this device if it comes back in the future.
401 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 MPI2_IOCSTATUS_SUCCESS) {
405 targ->encl_handle = 0x0;
406 targ->encl_level_valid = 0x0;
407 targ->encl_level = 0x0;
408 targ->connector_name[0] = ' ';
409 targ->connector_name[1] = ' ';
410 targ->connector_name[2] = ' ';
411 targ->connector_name[3] = ' ';
412 targ->encl_slot = 0x0;
413 targ->exp_dev_handle = 0x0;
415 targ->linkrate = 0x0;
418 targ->scsi_req_desc_type = 0;
421 mprsas_free_tm(sc, tm);
426 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
427 * Otherwise Volume Delete is same as Bare Drive Removal.
430 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
432 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
433 struct mpr_softc *sc;
434 struct mpr_command *cm;
435 struct mprsas_target *targ = NULL;
437 MPR_FUNCTRACE(sassc->sc);
440 targ = mprsas_find_target_by_handle(sassc, 0, handle);
442 /* FIXME: what is the action? */
443 /* We don't know about this device? */
444 mpr_dprint(sc, MPR_ERROR,
445 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
449 targ->flags |= MPRSAS_TARGET_INREMOVAL;
451 cm = mprsas_alloc_tm(sc);
453 mpr_dprint(sc, MPR_ERROR,
454 "%s: command alloc failure\n", __func__);
458 mprsas_rescan_target(sc, targ);
460 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
461 req->DevHandle = targ->handle;
462 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
463 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
465 /* SAS Hard Link Reset / SATA Link Reset */
466 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
470 cm->cm_desc.HighPriority.RequestFlags =
471 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
472 cm->cm_complete = mprsas_remove_volume;
473 cm->cm_complete_data = (void *)(uintptr_t)handle;
475 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
476 __func__, targ->tid);
477 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
479 mpr_map_command(sc, cm);
483 * The firmware performs debounce on the link to avoid transient link errors
484 * and false removals. When it does decide that link has been lost and a
485 * device needs to go away, it expects that the host will perform a target reset
486 * and then an op remove. The reset has the side-effect of aborting any
487 * outstanding requests for the device, which is required for the op-remove to
488 * succeed. It's not clear if the host should check for the device coming back
489 * alive after the reset.
492 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
494 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
495 struct mpr_softc *sc;
496 struct mpr_command *cm;
497 struct mprsas_target *targ = NULL;
499 MPR_FUNCTRACE(sassc->sc);
503 targ = mprsas_find_target_by_handle(sassc, 0, handle);
505 /* FIXME: what is the action? */
506 /* We don't know about this device? */
507 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
512 targ->flags |= MPRSAS_TARGET_INREMOVAL;
514 cm = mprsas_alloc_tm(sc);
516 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
521 mprsas_rescan_target(sc, targ);
523 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
524 memset(req, 0, sizeof(*req));
525 req->DevHandle = htole16(targ->handle);
526 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
527 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
529 /* SAS Hard Link Reset / SATA Link Reset */
530 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
534 cm->cm_desc.HighPriority.RequestFlags =
535 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
536 cm->cm_complete = mprsas_remove_device;
537 cm->cm_complete_data = (void *)(uintptr_t)handle;
539 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
540 __func__, targ->tid);
541 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
543 mpr_map_command(sc, cm);
547 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
549 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
550 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
551 struct mprsas_target *targ;
552 struct mpr_command *next_cm;
557 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
558 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
562 * Currently there should be no way we can hit this case. It only
563 * happens when we have a failure to allocate chain frames, and
564 * task management commands don't have S/G lists.
566 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
567 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
568 "handle %#04x! This should not happen!\n", __func__,
569 tm->cm_flags, handle);
573 /* XXX retry the remove after the diag reset completes? */
574 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
575 "0x%04x\n", __func__, handle);
576 mprsas_free_tm(sc, tm);
580 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
581 MPI2_IOCSTATUS_SUCCESS) {
582 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
583 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
586 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
587 le32toh(reply->TerminationCount));
588 mpr_free_reply(sc, tm->cm_reply_data);
589 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
591 /* Reuse the existing command */
592 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
593 memset(req, 0, sizeof(*req));
594 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
595 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
596 req->DevHandle = htole16(handle);
598 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
599 tm->cm_complete = mprsas_remove_complete;
600 tm->cm_complete_data = (void *)(uintptr_t)handle;
602 mpr_map_command(sc, tm);
604 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
606 if (targ->encl_level_valid) {
607 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
608 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
609 targ->connector_name);
611 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
614 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
615 ccb = tm->cm_complete_data;
616 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
617 mprsas_scsiio_complete(sc, tm);
622 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
624 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
626 struct mprsas_target *targ;
627 struct mprsas_lun *lun;
631 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
632 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
635 * Currently there should be no way we can hit this case. It only
636 * happens when we have a failure to allocate chain frames, and
637 * task management commands don't have S/G lists.
639 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
640 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
641 "handle %#04x! This should not happen!\n", __func__,
642 tm->cm_flags, handle);
643 mprsas_free_tm(sc, tm);
648 /* most likely a chip reset */
649 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
650 "0x%04x\n", __func__, handle);
651 mprsas_free_tm(sc, tm);
655 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
656 __func__, handle, le16toh(reply->IOCStatus));
659 * Don't clear target if remove fails because things will get confusing.
660 * Leave the devname and sasaddr intact so that we know to avoid reusing
661 * this target id if possible, and so we can assign the same target id
662 * to this device if it comes back in the future.
664 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
665 MPI2_IOCSTATUS_SUCCESS) {
668 targ->encl_handle = 0x0;
669 targ->encl_level_valid = 0x0;
670 targ->encl_level = 0x0;
671 targ->connector_name[0] = ' ';
672 targ->connector_name[1] = ' ';
673 targ->connector_name[2] = ' ';
674 targ->connector_name[3] = ' ';
675 targ->encl_slot = 0x0;
676 targ->exp_dev_handle = 0x0;
678 targ->linkrate = 0x0;
681 targ->scsi_req_desc_type = 0;
683 while (!SLIST_EMPTY(&targ->luns)) {
684 lun = SLIST_FIRST(&targ->luns);
685 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
690 mprsas_free_tm(sc, tm);
694 mprsas_register_events(struct mpr_softc *sc)
699 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
700 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
701 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
702 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
703 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
704 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
705 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
706 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
707 setbit(events, MPI2_EVENT_IR_VOLUME);
708 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
709 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
710 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
711 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
720 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 &sc->sassc->mprsas_eh);
727 mpr_attach_sas(struct mpr_softc *sc)
729 struct mprsas_softc *sassc;
731 int unit, error = 0, reqs;
734 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
736 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
738 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
739 "Cannot allocate SAS subsystem memory\n");
744 * XXX MaxTargets could change during a reinit. Since we don't
745 * resize the targets[] array during such an event, cache the value
746 * of MaxTargets here so that we don't get into trouble later. This
747 * should move into the reinit logic.
749 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
750 sassc->targets = malloc(sizeof(struct mprsas_target) *
751 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
752 if (!sassc->targets) {
753 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
754 "Cannot allocate SAS target memory\n");
761 reqs = sc->num_reqs - sc->num_prireqs - 1;
762 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
763 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
768 unit = device_get_unit(sc->mpr_dev);
769 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
770 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
771 if (sassc->sim == NULL) {
772 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
777 TAILQ_INIT(&sassc->ev_queue);
779 /* Initialize taskqueue for Event Handling */
780 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
781 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
782 taskqueue_thread_enqueue, &sassc->ev_tq);
783 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
784 device_get_nameunit(sc->mpr_dev));
789 * XXX There should be a bus for every port on the adapter, but since
790 * we're just going to fake the topology for now, we'll pretend that
791 * everything is just a target on a single bus.
793 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
794 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
795 "Error %d registering SCSI bus\n", error);
801 * Assume that discovery events will start right away.
803 * Hold off boot until discovery is complete.
805 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
806 sc->sassc->startup_refcount = 0;
807 mprsas_startup_increment(sassc);
809 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
812 * Register for async events so we can determine the EEDP
813 * capabilities of devices.
815 status = xpt_create_path(&sassc->path, /*periph*/NULL,
816 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
818 if (status != CAM_REQ_CMP) {
819 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
820 "Error %#x creating sim path\n", status);
825 #if (__FreeBSD_version >= 1000006) || \
826 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
827 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
829 event = AC_FOUND_DEVICE;
833 * Prior to the CAM locking improvements, we can't call
834 * xpt_register_async() with a particular path specified.
836 * If a path isn't specified, xpt_register_async() will
837 * generate a wildcard path and acquire the XPT lock while
838 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
839 * It will then drop the XPT lock once that is done.
841 * If a path is specified for xpt_register_async(), it will
842 * not acquire and drop the XPT lock around the call to
843 * xpt_action(). xpt_action() asserts that the caller
844 * holds the SIM lock, so the SIM lock has to be held when
845 * calling xpt_register_async() when the path is specified.
847 * But xpt_register_async calls xpt_for_all_devices(),
848 * which calls xptbustraverse(), which will acquire each
849 * SIM lock. When it traverses our particular bus, it will
850 * necessarily acquire the SIM lock, which will lead to a
851 * recursive lock acquisition.
853 * The CAM locking changes fix this problem by acquiring
854 * the XPT topology lock around bus traversal in
855 * xptbustraverse(), so the caller can hold the SIM lock
856 * and it does not cause a recursive lock acquisition.
858 * These __FreeBSD_version values are approximate, especially
859 * for stable/10, which is two months later than the actual
863 #if (__FreeBSD_version < 1000703) || \
864 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
866 status = xpt_register_async(event, mprsas_async, sc,
870 status = xpt_register_async(event, mprsas_async, sc,
874 if (status != CAM_REQ_CMP) {
875 mpr_dprint(sc, MPR_ERROR,
876 "Error %#x registering async handler for "
877 "AC_ADVINFO_CHANGED events\n", status);
878 xpt_free_path(sassc->path);
882 if (status != CAM_REQ_CMP) {
884 * EEDP use is the exception, not the rule.
885 * Warn the user, but do not fail to attach.
887 mpr_printf(sc, "EEDP capabilities disabled.\n");
892 mprsas_register_events(sc);
897 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
902 mpr_detach_sas(struct mpr_softc *sc)
904 struct mprsas_softc *sassc;
905 struct mprsas_lun *lun, *lun_tmp;
906 struct mprsas_target *targ;
911 if (sc->sassc == NULL)
915 mpr_deregister_events(sc, sassc->mprsas_eh);
918 * Drain and free the event handling taskqueue with the lock
919 * unheld so that any parallel processing tasks drain properly
920 * without deadlocking.
922 if (sassc->ev_tq != NULL)
923 taskqueue_free(sassc->ev_tq);
925 /* Make sure CAM doesn't wedge if we had to bail out early. */
928 while (sassc->startup_refcount != 0)
929 mprsas_startup_decrement(sassc);
931 /* Deregister our async handler */
932 if (sassc->path != NULL) {
933 xpt_register_async(0, mprsas_async, sc, sassc->path);
934 xpt_free_path(sassc->path);
938 if (sassc->flags & MPRSAS_IN_STARTUP)
939 xpt_release_simq(sassc->sim, 1);
941 if (sassc->sim != NULL) {
942 xpt_bus_deregister(cam_sim_path(sassc->sim));
943 cam_sim_free(sassc->sim, FALSE);
948 if (sassc->devq != NULL)
949 cam_simq_free(sassc->devq);
951 for (i = 0; i < sassc->maxtargets; i++) {
952 targ = &sassc->targets[i];
953 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
957 free(sassc->targets, M_MPR);
965 mprsas_discovery_end(struct mprsas_softc *sassc)
967 struct mpr_softc *sc = sassc->sc;
971 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
972 callout_stop(&sassc->discovery_callout);
975 * After discovery has completed, check the mapping table for any
976 * missing devices and update their missing counts. Only do this once
977 * whenever the driver is initialized so that missing counts aren't
978 * updated unnecessarily. Note that just because discovery has
979 * completed doesn't mean that events have been processed yet. The
980 * check_devices function is a callout timer that checks if ALL devices
981 * are missing. If so, it will wait a little longer for events to
982 * complete and keep resetting itself until some device in the mapping
983 * table is not missing, meaning that event processing has started.
985 if (sc->track_mapping_events) {
986 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
987 "completed. Check for missing devices in the mapping "
989 callout_reset(&sc->device_check_callout,
990 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
996 mprsas_action(struct cam_sim *sim, union ccb *ccb)
998 struct mprsas_softc *sassc;
1000 sassc = cam_sim_softc(sim);
1002 MPR_FUNCTRACE(sassc->sc);
1003 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1004 ccb->ccb_h.func_code);
1005 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1007 switch (ccb->ccb_h.func_code) {
1010 struct ccb_pathinq *cpi = &ccb->cpi;
1011 struct mpr_softc *sc = sassc->sc;
1012 uint8_t sges_per_frame;
1014 cpi->version_num = 1;
1015 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1016 cpi->target_sprt = 0;
1017 #if (__FreeBSD_version >= 1000039) || \
1018 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1019 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1021 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1023 cpi->hba_eng_cnt = 0;
1024 cpi->max_target = sassc->maxtargets - 1;
1028 * initiator_id is set here to an ID outside the set of valid
1029 * target IDs (including volumes).
1031 cpi->initiator_id = sassc->maxtargets;
1032 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1033 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1034 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1035 cpi->unit_number = cam_sim_unit(sim);
1036 cpi->bus_id = cam_sim_bus(sim);
1038 * XXXSLM-I think this needs to change based on config page or
1039 * something instead of hardcoded to 150000.
1041 cpi->base_transfer_speed = 150000;
1042 cpi->transport = XPORT_SAS;
1043 cpi->transport_version = 0;
1044 cpi->protocol = PROTO_SCSI;
1045 cpi->protocol_version = SCSI_REV_SPC;
1048 * Max IO Size is Page Size * the following:
1049 * ((SGEs per frame - 1 for chain element) *
1050 * Max Chain Depth) + 1 for no chain needed in last frame
1052 * If user suggests a Max IO size to use, use the smaller of the
1053 * user's value and the calculated value as long as the user's
1054 * value is larger than 0. The user's value is in pages.
1056 sges_per_frame = (sc->chain_frame_size /
1057 sizeof(MPI2_IEEE_SGE_SIMPLE64)) - 1;
1058 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1059 cpi->maxio *= PAGE_SIZE;
1060 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1062 cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1063 sc->maxio = cpi->maxio;
1064 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1067 case XPT_GET_TRAN_SETTINGS:
1069 struct ccb_trans_settings *cts;
1070 struct ccb_trans_settings_sas *sas;
1071 struct ccb_trans_settings_scsi *scsi;
1072 struct mprsas_target *targ;
1075 sas = &cts->xport_specific.sas;
1076 scsi = &cts->proto_specific.scsi;
1078 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1079 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1080 cts->ccb_h.target_id));
1081 targ = &sassc->targets[cts->ccb_h.target_id];
1082 if (targ->handle == 0x0) {
1083 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1087 cts->protocol_version = SCSI_REV_SPC2;
1088 cts->transport = XPORT_SAS;
1089 cts->transport_version = 0;
1091 sas->valid = CTS_SAS_VALID_SPEED;
1092 switch (targ->linkrate) {
1094 sas->bitrate = 150000;
1097 sas->bitrate = 300000;
1100 sas->bitrate = 600000;
1103 sas->bitrate = 1200000;
1109 cts->protocol = PROTO_SCSI;
1110 scsi->valid = CTS_SCSI_VALID_TQ;
1111 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1113 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1116 case XPT_CALC_GEOMETRY:
1117 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1118 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1121 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1123 mprsas_action_resetdev(sassc, ccb);
1128 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1129 "for abort or reset\n");
1130 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1133 mprsas_action_scsiio(sassc, ccb);
1135 #if __FreeBSD_version >= 900026
1137 mprsas_action_smpio(sassc, ccb);
1141 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1149 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1150 target_id_t target_id, lun_id_t lun_id)
1152 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1153 struct cam_path *path;
1155 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1156 ac_code, target_id, (uintmax_t)lun_id);
1158 if (xpt_create_path(&path, NULL,
1159 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1160 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1165 xpt_async(ac_code, path, NULL);
1166 xpt_free_path(path);
1170 mprsas_complete_all_commands(struct mpr_softc *sc)
1172 struct mpr_command *cm;
1177 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1179 /* complete all commands with a NULL reply */
1180 for (i = 1; i < sc->num_reqs; i++) {
1181 cm = &sc->commands[i];
1182 cm->cm_reply = NULL;
1185 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1186 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1188 if (cm->cm_complete != NULL) {
1189 mprsas_log_command(cm, MPR_RECOVERY,
1190 "completing cm %p state %x ccb %p for diag reset\n",
1191 cm, cm->cm_state, cm->cm_ccb);
1192 cm->cm_complete(sc, cm);
1196 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1197 mprsas_log_command(cm, MPR_RECOVERY,
1198 "waking up cm %p state %x ccb %p for diag reset\n",
1199 cm, cm->cm_state, cm->cm_ccb);
1204 if (cm->cm_sc->io_cmds_active != 0)
1205 cm->cm_sc->io_cmds_active--;
1207 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1208 /* this should never happen, but if it does, log */
1209 mprsas_log_command(cm, MPR_RECOVERY,
1210 "cm %p state %x flags 0x%x ccb %p during diag "
1211 "reset\n", cm, cm->cm_state, cm->cm_flags,
1218 mprsas_handle_reinit(struct mpr_softc *sc)
1222 /* Go back into startup mode and freeze the simq, so that CAM
1223 * doesn't send any commands until after we've rediscovered all
1224 * targets and found the proper device handles for them.
1226 * After the reset, portenable will trigger discovery, and after all
1227 * discovery-related activities have finished, the simq will be
1230 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1231 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1232 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1233 mprsas_startup_increment(sc->sassc);
1235 /* notify CAM of a bus reset */
1236 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1239 /* complete and cleanup after all outstanding commands */
1240 mprsas_complete_all_commands(sc);
1242 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1243 __func__, sc->sassc->startup_refcount);
1245 /* zero all the target handles, since they may change after the
1246 * reset, and we have to rediscover all the targets and use the new
1249 for (i = 0; i < sc->sassc->maxtargets; i++) {
1250 if (sc->sassc->targets[i].outstanding != 0)
1251 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1252 i, sc->sassc->targets[i].outstanding);
1253 sc->sassc->targets[i].handle = 0x0;
1254 sc->sassc->targets[i].exp_dev_handle = 0x0;
1255 sc->sassc->targets[i].outstanding = 0;
1256 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1260 mprsas_tm_timeout(void *data)
1262 struct mpr_command *tm = data;
1263 struct mpr_softc *sc = tm->cm_sc;
1265 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1267 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1273 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1275 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1276 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1277 unsigned int cm_count = 0;
1278 struct mpr_command *cm;
1279 struct mprsas_target *targ;
1281 callout_stop(&tm->cm_callout);
1283 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1284 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1288 * Currently there should be no way we can hit this case. It only
1289 * happens when we have a failure to allocate chain frames, and
1290 * task management commands don't have S/G lists.
1292 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1293 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1294 "%s: cm_flags = %#x for LUN reset! "
1295 "This should not happen!\n", __func__, tm->cm_flags);
1296 mprsas_free_tm(sc, tm);
1300 if (reply == NULL) {
1301 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1303 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1304 /* this completion was due to a reset, just cleanup */
1305 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1306 "reset, ignoring NULL LUN reset reply\n");
1308 mprsas_free_tm(sc, tm);
1311 /* we should have gotten a reply. */
1312 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1313 "LUN reset attempt, resetting controller\n");
1319 mpr_dprint(sc, MPR_RECOVERY,
1320 "logical unit reset status 0x%x code 0x%x count %u\n",
1321 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1322 le32toh(reply->TerminationCount));
1325 * See if there are any outstanding commands for this LUN.
1326 * This could be made more efficient by using a per-LU data
1327 * structure of some sort.
1329 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1330 if (cm->cm_lun == tm->cm_lun)
1334 if (cm_count == 0) {
1335 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1336 "Finished recovery after LUN reset for target %u\n",
1339 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1343 * We've finished recovery for this logical unit. check and
1344 * see if some other logical unit has a timedout command
1345 * that needs to be processed.
1347 cm = TAILQ_FIRST(&targ->timedout_commands);
1349 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1350 "More commands to abort for target %u\n", targ->tid);
1351 mprsas_send_abort(sc, tm, cm);
1354 mprsas_free_tm(sc, tm);
1357 /* if we still have commands for this LUN, the reset
1358 * effectively failed, regardless of the status reported.
1359 * Escalate to a target reset.
1361 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1362 "logical unit reset complete for target %u, but still "
1363 "have %u command(s), sending target reset\n", targ->tid,
1365 mprsas_send_reset(sc, tm,
1366 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1371 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1373 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1374 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1375 struct mprsas_target *targ;
1377 callout_stop(&tm->cm_callout);
1379 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1380 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1384 * Currently there should be no way we can hit this case. It only
1385 * happens when we have a failure to allocate chain frames, and
1386 * task management commands don't have S/G lists.
1388 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1389 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1390 "reset! This should not happen!\n", __func__, tm->cm_flags);
1391 mprsas_free_tm(sc, tm);
1395 if (reply == NULL) {
1396 mpr_dprint(sc, MPR_RECOVERY,
1397 "NULL target reset reply for tm %p TaskMID %u\n",
1398 tm, le16toh(req->TaskMID));
1399 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1400 /* this completion was due to a reset, just cleanup */
1401 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1402 "reset, ignoring NULL target reset reply\n");
1404 mprsas_free_tm(sc, tm);
1407 /* we should have gotten a reply. */
1408 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1409 "target reset attempt, resetting controller\n");
1415 mpr_dprint(sc, MPR_RECOVERY,
1416 "target reset status 0x%x code 0x%x count %u\n",
1417 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1418 le32toh(reply->TerminationCount));
1420 if (targ->outstanding == 0) {
1422 * We've finished recovery for this target and all
1423 * of its logical units.
1425 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1426 "Finished reset recovery for target %u\n", targ->tid);
1428 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1432 mprsas_free_tm(sc, tm);
1435 * After a target reset, if this target still has
1436 * outstanding commands, the reset effectively failed,
1437 * regardless of the status reported. escalate.
1439 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1440 "Target reset complete for target %u, but still have %u "
1441 "command(s), resetting controller\n", targ->tid,
1447 #define MPR_RESET_TIMEOUT 30
1450 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1452 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1453 struct mprsas_target *target;
1456 target = tm->cm_targ;
1457 if (target->handle == 0) {
1458 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1459 "%d\n", __func__, target->tid);
1463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1464 req->DevHandle = htole16(target->handle);
1465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1466 req->TaskType = type;
1468 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1469 /* XXX Need to handle invalid LUNs */
1470 MPR_SET_LUN(req->LUN, tm->cm_lun);
1471 tm->cm_targ->logical_unit_resets++;
1472 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1473 "Sending logical unit reset to target %u lun %d\n",
1474 target->tid, tm->cm_lun);
1475 tm->cm_complete = mprsas_logical_unit_reset_complete;
1476 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1477 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1479 * Target reset method =
1480 * SAS Hard Link Reset / SATA Link Reset
1482 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1483 tm->cm_targ->target_resets++;
1484 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1485 "Sending target reset to target %u\n", target->tid);
1486 tm->cm_complete = mprsas_target_reset_complete;
1487 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1490 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1494 if (target->encl_level_valid) {
1495 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1496 "At enclosure level %d, slot %d, connector name (%4s)\n",
1497 target->encl_level, target->encl_slot,
1498 target->connector_name);
1502 tm->cm_desc.HighPriority.RequestFlags =
1503 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1504 tm->cm_complete_data = (void *)tm;
1506 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1507 mprsas_tm_timeout, tm);
1509 err = mpr_map_command(sc, tm);
1511 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1512 "error %d sending reset type %u\n", err, type);
1519 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1521 struct mpr_command *cm;
1522 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1523 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1524 struct mprsas_target *targ;
1526 callout_stop(&tm->cm_callout);
1528 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1529 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1533 * Currently there should be no way we can hit this case. It only
1534 * happens when we have a failure to allocate chain frames, and
1535 * task management commands don't have S/G lists.
1537 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1538 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1539 "cm_flags = %#x for abort %p TaskMID %u!\n",
1540 tm->cm_flags, tm, le16toh(req->TaskMID));
1541 mprsas_free_tm(sc, tm);
1545 if (reply == NULL) {
1546 mpr_dprint(sc, MPR_RECOVERY,
1547 "NULL abort reply for tm %p TaskMID %u\n",
1548 tm, le16toh(req->TaskMID));
1549 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1550 /* this completion was due to a reset, just cleanup */
1551 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1552 "reset, ignoring NULL abort reply\n");
1554 mprsas_free_tm(sc, tm);
1556 /* we should have gotten a reply. */
1557 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1558 "abort attempt, resetting controller\n");
1564 mpr_dprint(sc, MPR_RECOVERY,
1565 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1566 le16toh(req->TaskMID),
1567 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1568 le32toh(reply->TerminationCount));
1570 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1573 * if there are no more timedout commands, we're done with
1574 * error recovery for this target.
1576 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1577 "Finished abort recovery for target %u\n", targ->tid);
1579 mprsas_free_tm(sc, tm);
1580 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1581 /* abort success, but we have more timedout commands to abort */
1582 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1583 "Continuing abort recovery for target %u\n", targ->tid);
1584 mprsas_send_abort(sc, tm, cm);
1587 * we didn't get a command completion, so the abort
1588 * failed as far as we're concerned. escalate.
1590 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1591 "Abort failed for target %u, sending logical unit reset\n",
1594 mprsas_send_reset(sc, tm,
1595 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1599 #define MPR_ABORT_TIMEOUT 5
1602 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1603 struct mpr_command *cm)
1605 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1606 struct mprsas_target *targ;
1610 if (targ->handle == 0) {
1611 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1612 "%s null devhandle for target_id %d\n",
1613 __func__, cm->cm_ccb->ccb_h.target_id);
1617 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1618 "Aborting command %p\n", cm);
1620 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1621 req->DevHandle = htole16(targ->handle);
1622 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1623 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1625 /* XXX Need to handle invalid LUNs */
1626 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1628 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1631 tm->cm_desc.HighPriority.RequestFlags =
1632 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1633 tm->cm_complete = mprsas_abort_complete;
1634 tm->cm_complete_data = (void *)tm;
1635 tm->cm_targ = cm->cm_targ;
1636 tm->cm_lun = cm->cm_lun;
1638 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1639 mprsas_tm_timeout, tm);
1643 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1645 err = mpr_map_command(sc, tm);
1647 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1648 "error %d sending abort for cm %p SMID %u\n",
1649 err, cm, req->TaskMID);
1654 mprsas_scsiio_timeout(void *data)
1656 sbintime_t elapsed, now;
1658 struct mpr_softc *sc;
1659 struct mpr_command *cm;
1660 struct mprsas_target *targ;
1662 cm = (struct mpr_command *)data;
1668 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1670 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1673 * Run the interrupt handler to make sure it's not pending. This
1674 * isn't perfect because the command could have already completed
1675 * and been re-used, though this is unlikely.
1677 mpr_intr_locked(sc);
1678 if (cm->cm_state == MPR_CM_STATE_FREE) {
1679 mprsas_log_command(cm, MPR_XINFO,
1680 "SCSI command %p almost timed out\n", cm);
1684 if (cm->cm_ccb == NULL) {
1685 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1692 elapsed = now - ccb->ccb_h.qos.sim_data;
1693 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1694 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1695 targ->tid, targ->handle, ccb->ccb_h.timeout,
1696 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1697 if (targ->encl_level_valid) {
1698 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1699 "At enclosure level %d, slot %d, connector name (%4s)\n",
1700 targ->encl_level, targ->encl_slot, targ->connector_name);
1703 /* XXX first, check the firmware state, to see if it's still
1704 * operational. if not, do a diag reset.
1706 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1707 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1708 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1710 if (targ->tm != NULL) {
1711 /* target already in recovery, just queue up another
1712 * timedout command to be processed later.
1714 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1715 "processing by tm %p\n", cm, targ->tm);
1717 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1719 /* start recovery by aborting the first timedout command */
1720 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1721 "Sending abort to target %u for SMID %d\n", targ->tid,
1722 cm->cm_desc.Default.SMID);
1723 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1725 mprsas_send_abort(sc, targ->tm, cm);
1728 /* XXX queue this target up for recovery once a TM becomes
1729 * available. The firmware only has a limited number of
1730 * HighPriority credits for the high priority requests used
1731 * for task management, and we ran out.
1733 * Isilon: don't worry about this for now, since we have
1734 * more credits than disks in an enclosure, and limit
1735 * ourselves to one TM per target for recovery.
1737 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1738 "timedout cm %p failed to allocate a tm\n", cm);
1743 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1745 * Return 0 - for success,
1746 * 1 - to immediately return back the command with success status to CAM
1747 * negative value - to fallback to firmware path i.e. issue scsi unmap
1748 * to FW without any translation.
1751 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1752 union ccb *ccb, struct mprsas_target *targ)
1754 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1755 struct ccb_scsiio *csio;
1756 struct unmap_parm_list *plist;
1757 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1758 struct nvme_command *c;
1760 uint16_t ndesc, list_len, data_length;
1761 struct mpr_prp_page *prp_page_info;
1762 uint64_t nvme_dsm_ranges_dma_handle;
1765 #if __FreeBSD_version >= 1100103
1766 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1768 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1769 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1770 ccb->csio.cdb_io.cdb_ptr[8]);
1772 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1773 ccb->csio.cdb_io.cdb_bytes[8]);
1777 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1781 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1783 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1784 "save UNMAP data\n");
1788 /* Copy SCSI unmap data to a local buffer */
1789 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1791 /* return back the unmap command to CAM with success status,
1792 * if number of descripts is zero.
1794 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1796 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1797 "UNMAP cmd is Zero\n");
1802 data_length = ndesc * sizeof(struct nvme_dsm_range);
1803 if (data_length > targ->MDTS) {
1804 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1805 "Device's MDTS: %d\n", data_length, targ->MDTS);
1810 prp_page_info = mpr_alloc_prp_page(sc);
1811 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1812 "UNMAP command.\n", __func__));
1815 * Insert the allocated PRP page into the command's PRP page list. This
1816 * will be freed when the command is freed.
1818 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1820 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1821 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1823 bzero(nvme_dsm_ranges, data_length);
1825 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1826 * for each descriptors contained in SCSI UNMAP data.
1828 for (i = 0; i < ndesc; i++) {
1829 nvme_dsm_ranges[i].length =
1830 htole32(be32toh(plist->desc[i].nlb));
1831 nvme_dsm_ranges[i].starting_lba =
1832 htole64(be64toh(plist->desc[i].slba));
1833 nvme_dsm_ranges[i].attributes = 0;
1836 /* Build MPI2.6's NVMe Encapsulated Request Message */
1837 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1838 bzero(req, sizeof(*req));
1839 req->DevHandle = htole16(targ->handle);
1840 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1841 req->Flags = MPI26_NVME_FLAGS_WRITE;
1842 req->ErrorResponseBaseAddress.High =
1843 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1844 req->ErrorResponseBaseAddress.Low =
1845 htole32(cm->cm_sense_busaddr);
1846 req->ErrorResponseAllocationLength =
1847 htole16(sizeof(struct nvme_completion));
1848 req->EncapsulatedCommandLength =
1849 htole16(sizeof(struct nvme_command));
1850 req->DataLength = htole32(data_length);
1852 /* Build NVMe DSM command */
1853 c = (struct nvme_command *) req->NVMe_Command;
1854 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1855 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1856 c->cdw10 = htole32(ndesc - 1);
1857 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1859 cm->cm_length = data_length;
1862 cm->cm_complete = mprsas_scsiio_complete;
1863 cm->cm_complete_data = ccb;
1865 cm->cm_lun = csio->ccb_h.target_lun;
1868 cm->cm_desc.Default.RequestFlags =
1869 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1871 csio->ccb_h.qos.sim_data = sbinuptime();
1872 #if __FreeBSD_version >= 1000029
1873 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1874 mprsas_scsiio_timeout, cm, 0);
1875 #else //__FreeBSD_version < 1000029
1876 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1877 mprsas_scsiio_timeout, cm);
1878 #endif //__FreeBSD_version >= 1000029
1881 targ->outstanding++;
1882 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1883 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1885 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1886 __func__, cm, ccb, targ->outstanding);
1888 mpr_build_nvme_prp(sc, cm, req,
1889 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1890 mpr_map_command(sc, cm);
1898 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1900 MPI2_SCSI_IO_REQUEST *req;
1901 struct ccb_scsiio *csio;
1902 struct mpr_softc *sc;
1903 struct mprsas_target *targ;
1904 struct mprsas_lun *lun;
1905 struct mpr_command *cm;
1906 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1907 uint16_t eedp_flags;
1908 uint32_t mpi_control;
1913 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1916 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1917 ("Target %d out of bounds in XPT_SCSI_IO\n",
1918 csio->ccb_h.target_id));
1919 targ = &sassc->targets[csio->ccb_h.target_id];
1920 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1921 if (targ->handle == 0x0) {
1922 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1923 __func__, csio->ccb_h.target_id);
1924 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1928 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1929 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1930 "supported %u\n", __func__, csio->ccb_h.target_id);
1931 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1936 * Sometimes, it is possible to get a command that is not "In
1937 * Progress" and was actually aborted by the upper layer. Check for
1938 * this here and complete the command without error.
1940 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1941 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1942 "target %u\n", __func__, csio->ccb_h.target_id);
1947 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1948 * that the volume has timed out. We want volumes to be enumerated
1949 * until they are deleted/removed, not just failed.
1951 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1952 if (targ->devinfo == 0)
1953 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1955 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1960 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1961 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1962 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1968 * If target has a reset in progress, freeze the devq and return. The
1969 * devq will be released when the TM reset is finished.
1971 if (targ->flags & MPRSAS_TARGET_INRESET) {
1972 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1973 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1974 __func__, targ->tid);
1975 xpt_freeze_devq(ccb->ccb_h.path, 1);
1980 cm = mpr_alloc_command(sc);
1981 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1983 mpr_free_command(sc, cm);
1985 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1986 xpt_freeze_simq(sassc->sim, 1);
1987 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1989 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1990 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1995 /* For NVME device's issue UNMAP command directly to NVME drives by
1996 * constructing equivalent native NVMe DataSetManagement command.
1998 #if __FreeBSD_version >= 1100103
1999 scsi_opcode = scsiio_cdb_ptr(csio)[0];
2001 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2002 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2004 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2006 if (scsi_opcode == UNMAP &&
2008 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2009 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2010 if (rc == 1) { /* return command to CAM with success status */
2011 mpr_free_command(sc, cm);
2012 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2015 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2019 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2020 bzero(req, sizeof(*req));
2021 req->DevHandle = htole16(targ->handle);
2022 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2024 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2025 req->SenseBufferLength = MPR_SENSE_LEN;
2027 req->ChainOffset = 0;
2028 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2033 req->DataLength = htole32(csio->dxfer_len);
2034 req->BidirectionalDataLength = 0;
2035 req->IoFlags = htole16(csio->cdb_len);
2038 /* Note: BiDirectional transfers are not supported */
2039 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2041 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2042 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2045 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2046 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2050 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2054 if (csio->cdb_len == 32)
2055 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2057 * It looks like the hardware doesn't require an explicit tag
2058 * number for each transaction. SAM Task Management not supported
2061 switch (csio->tag_action) {
2062 case MSG_HEAD_OF_Q_TAG:
2063 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2065 case MSG_ORDERED_Q_TAG:
2066 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2069 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2071 case CAM_TAG_ACTION_NONE:
2072 case MSG_SIMPLE_Q_TAG:
2074 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2077 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2078 req->Control = htole32(mpi_control);
2080 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2081 mpr_free_command(sc, cm);
2082 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2087 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2088 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2090 KASSERT(csio->cdb_len <= IOCDBLEN,
2091 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2092 "is not set", csio->cdb_len));
2093 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2095 req->IoFlags = htole16(csio->cdb_len);
2098 * Check if EEDP is supported and enabled. If it is then check if the
2099 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2100 * is formatted for EEDP support. If all of this is true, set CDB up
2101 * for EEDP transfer.
2103 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2104 if (sc->eedp_enabled && eedp_flags) {
2105 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2106 if (lun->lun_id == csio->ccb_h.target_lun) {
2111 if ((lun != NULL) && (lun->eedp_formatted)) {
2112 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2113 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2114 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2115 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2116 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2118 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2120 req->EEDPFlags = htole16(eedp_flags);
2123 * If CDB less than 32, fill in Primary Ref Tag with
2124 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2125 * already there. Also, set protection bit. FreeBSD
2126 * currently does not support CDBs bigger than 16, but
2127 * the code doesn't hurt, and will be here for the
2130 if (csio->cdb_len != 32) {
2131 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2132 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2133 PrimaryReferenceTag;
2134 for (i = 0; i < 4; i++) {
2136 req->CDB.CDB32[lba_byte + i];
2139 req->CDB.EEDP32.PrimaryReferenceTag =
2141 CDB.EEDP32.PrimaryReferenceTag);
2142 req->CDB.EEDP32.PrimaryApplicationTagMask =
2144 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
2148 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2149 req->EEDPFlags = htole16(eedp_flags);
2150 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2156 cm->cm_length = csio->dxfer_len;
2157 if (cm->cm_length != 0) {
2159 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2163 cm->cm_sge = &req->SGL;
2164 cm->cm_sglsize = (32 - 24) * 4;
2165 cm->cm_complete = mprsas_scsiio_complete;
2166 cm->cm_complete_data = ccb;
2168 cm->cm_lun = csio->ccb_h.target_lun;
2171 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2172 * and set descriptor type.
2174 if (targ->scsi_req_desc_type ==
2175 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2176 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2177 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2178 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2179 if (!sc->atomic_desc_capable) {
2180 cm->cm_desc.FastPathSCSIIO.DevHandle =
2181 htole16(targ->handle);
2184 cm->cm_desc.SCSIIO.RequestFlags =
2185 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2186 if (!sc->atomic_desc_capable)
2187 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2190 csio->ccb_h.qos.sim_data = sbinuptime();
2191 #if __FreeBSD_version >= 1000029
2192 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2193 mprsas_scsiio_timeout, cm, 0);
2194 #else //__FreeBSD_version < 1000029
2195 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2196 mprsas_scsiio_timeout, cm);
2197 #endif //__FreeBSD_version >= 1000029
2200 targ->outstanding++;
2201 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2202 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2204 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2205 __func__, cm, ccb, targ->outstanding);
2207 mpr_map_command(sc, cm);
2212 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2215 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2216 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2220 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2221 MPI2_IOCSTATUS_MASK;
2222 u8 scsi_state = mpi_reply->SCSIState;
2223 u8 scsi_status = mpi_reply->SCSIStatus;
2224 char *desc_ioc_state = NULL;
2225 char *desc_scsi_status = NULL;
2226 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2228 if (log_info == 0x31170000)
2231 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2233 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2236 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2237 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2238 if (targ->encl_level_valid) {
2239 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2240 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2241 targ->connector_name);
2245 * We can add more detail about underflow data here
2248 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2249 "scsi_state %b\n", desc_scsi_status, scsi_status,
2250 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2251 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2253 if (sc->mpr_debug & MPR_XINFO &&
2254 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2255 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2256 scsi_sense_print(csio);
2257 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2260 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2261 response_info = le32toh(mpi_reply->ResponseInfo);
2262 response_bytes = (u8 *)&response_info;
2263 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2265 mpr_describe_table(mpr_scsi_taskmgmt_string,
2266 response_bytes[0]));
2270 /** mprsas_nvme_trans_status_code
2272 * Convert Native NVMe command error status to
2273 * equivalent SCSI error status.
2275 * Returns appropriate scsi_status
2278 mprsas_nvme_trans_status_code(struct nvme_status nvme_status,
2279 struct mpr_command *cm)
2281 u8 status = MPI2_SCSI_STATUS_GOOD;
2282 int skey, asc, ascq;
2283 union ccb *ccb = cm->cm_complete_data;
2284 int returned_sense_len;
2286 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2287 skey = SSD_KEY_ILLEGAL_REQUEST;
2288 asc = SCSI_ASC_NO_SENSE;
2289 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2291 switch (nvme_status.sct) {
2292 case NVME_SCT_GENERIC:
2293 switch (nvme_status.sc) {
2294 case NVME_SC_SUCCESS:
2295 status = MPI2_SCSI_STATUS_GOOD;
2296 skey = SSD_KEY_NO_SENSE;
2297 asc = SCSI_ASC_NO_SENSE;
2298 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2300 case NVME_SC_INVALID_OPCODE:
2301 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2302 skey = SSD_KEY_ILLEGAL_REQUEST;
2303 asc = SCSI_ASC_ILLEGAL_COMMAND;
2304 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2306 case NVME_SC_INVALID_FIELD:
2307 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2308 skey = SSD_KEY_ILLEGAL_REQUEST;
2309 asc = SCSI_ASC_INVALID_CDB;
2310 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2312 case NVME_SC_DATA_TRANSFER_ERROR:
2313 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2314 skey = SSD_KEY_MEDIUM_ERROR;
2315 asc = SCSI_ASC_NO_SENSE;
2316 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2318 case NVME_SC_ABORTED_POWER_LOSS:
2319 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2320 skey = SSD_KEY_ABORTED_COMMAND;
2321 asc = SCSI_ASC_WARNING;
2322 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2324 case NVME_SC_INTERNAL_DEVICE_ERROR:
2325 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2326 skey = SSD_KEY_HARDWARE_ERROR;
2327 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2328 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2330 case NVME_SC_ABORTED_BY_REQUEST:
2331 case NVME_SC_ABORTED_SQ_DELETION:
2332 case NVME_SC_ABORTED_FAILED_FUSED:
2333 case NVME_SC_ABORTED_MISSING_FUSED:
2334 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2335 skey = SSD_KEY_ABORTED_COMMAND;
2336 asc = SCSI_ASC_NO_SENSE;
2337 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2339 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2340 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2341 skey = SSD_KEY_ILLEGAL_REQUEST;
2342 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2343 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2345 case NVME_SC_LBA_OUT_OF_RANGE:
2346 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2347 skey = SSD_KEY_ILLEGAL_REQUEST;
2348 asc = SCSI_ASC_ILLEGAL_BLOCK;
2349 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2351 case NVME_SC_CAPACITY_EXCEEDED:
2352 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2353 skey = SSD_KEY_MEDIUM_ERROR;
2354 asc = SCSI_ASC_NO_SENSE;
2355 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2357 case NVME_SC_NAMESPACE_NOT_READY:
2358 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2359 skey = SSD_KEY_NOT_READY;
2360 asc = SCSI_ASC_LUN_NOT_READY;
2361 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2365 case NVME_SCT_COMMAND_SPECIFIC:
2366 switch (nvme_status.sc) {
2367 case NVME_SC_INVALID_FORMAT:
2368 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2369 skey = SSD_KEY_ILLEGAL_REQUEST;
2370 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2371 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2373 case NVME_SC_CONFLICTING_ATTRIBUTES:
2374 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2375 skey = SSD_KEY_ILLEGAL_REQUEST;
2376 asc = SCSI_ASC_INVALID_CDB;
2377 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2381 case NVME_SCT_MEDIA_ERROR:
2382 switch (nvme_status.sc) {
2383 case NVME_SC_WRITE_FAULTS:
2384 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2385 skey = SSD_KEY_MEDIUM_ERROR;
2386 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2387 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2389 case NVME_SC_UNRECOVERED_READ_ERROR:
2390 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2391 skey = SSD_KEY_MEDIUM_ERROR;
2392 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2393 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2395 case NVME_SC_GUARD_CHECK_ERROR:
2396 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2397 skey = SSD_KEY_MEDIUM_ERROR;
2398 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2399 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2401 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2402 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2403 skey = SSD_KEY_MEDIUM_ERROR;
2404 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2405 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2407 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2408 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2409 skey = SSD_KEY_MEDIUM_ERROR;
2410 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2411 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2413 case NVME_SC_COMPARE_FAILURE:
2414 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2415 skey = SSD_KEY_MISCOMPARE;
2416 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2417 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2419 case NVME_SC_ACCESS_DENIED:
2420 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2421 skey = SSD_KEY_ILLEGAL_REQUEST;
2422 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2423 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2429 returned_sense_len = sizeof(struct scsi_sense_data);
2430 if (returned_sense_len < ccb->csio.sense_len)
2431 ccb->csio.sense_resid = ccb->csio.sense_len -
2434 ccb->csio.sense_resid = 0;
2436 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2437 1, skey, asc, ascq, SSD_ELEM_NONE);
2438 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2443 /** mprsas_complete_nvme_unmap
2445 * Complete native NVMe command issued using NVMe Encapsulated
2449 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2451 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2452 struct nvme_completion *nvme_completion = NULL;
2453 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2455 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2456 if (le16toh(mpi_reply->ErrorResponseCount)){
2457 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2458 scsi_status = mprsas_nvme_trans_status_code(
2459 nvme_completion->status, cm);
2465 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2467 MPI2_SCSI_IO_REPLY *rep;
2469 struct ccb_scsiio *csio;
2470 struct mprsas_softc *sassc;
2471 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2472 u8 *TLR_bits, TLR_on, *scsi_cdb;
2475 struct mprsas_target *target;
2476 target_id_t target_id;
2479 mpr_dprint(sc, MPR_TRACE,
2480 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2481 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2482 cm->cm_targ->outstanding);
2484 callout_stop(&cm->cm_callout);
2485 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2488 ccb = cm->cm_complete_data;
2490 target_id = csio->ccb_h.target_id;
2491 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2493 * XXX KDM if the chain allocation fails, does it matter if we do
2494 * the sync and unload here? It is simpler to do it in every case,
2495 * assuming it doesn't cause problems.
2497 if (cm->cm_data != NULL) {
2498 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2499 dir = BUS_DMASYNC_POSTREAD;
2500 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2501 dir = BUS_DMASYNC_POSTWRITE;
2502 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2503 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2506 cm->cm_targ->completed++;
2507 cm->cm_targ->outstanding--;
2508 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2509 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2511 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2512 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2513 if (cm->cm_reply != NULL)
2514 mprsas_log_command(cm, MPR_RECOVERY,
2515 "completed timedout cm %p ccb %p during recovery "
2516 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2517 le16toh(rep->IOCStatus), rep->SCSIStatus,
2518 rep->SCSIState, le32toh(rep->TransferCount));
2520 mprsas_log_command(cm, MPR_RECOVERY,
2521 "completed timedout cm %p ccb %p during recovery\n",
2523 } else if (cm->cm_targ->tm != NULL) {
2524 if (cm->cm_reply != NULL)
2525 mprsas_log_command(cm, MPR_RECOVERY,
2526 "completed cm %p ccb %p during recovery "
2527 "ioc %x scsi %x state %x xfer %u\n",
2528 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2529 rep->SCSIStatus, rep->SCSIState,
2530 le32toh(rep->TransferCount));
2532 mprsas_log_command(cm, MPR_RECOVERY,
2533 "completed cm %p ccb %p during recovery\n",
2535 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2536 mprsas_log_command(cm, MPR_RECOVERY,
2537 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2540 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2542 * We ran into an error after we tried to map the command,
2543 * so we're getting a callback without queueing the command
2544 * to the hardware. So we set the status here, and it will
2545 * be retained below. We'll go through the "fast path",
2546 * because there can be no reply when we haven't actually
2547 * gone out to the hardware.
2549 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2552 * Currently the only error included in the mask is
2553 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2554 * chain frames. We need to freeze the queue until we get
2555 * a command that completed without this error, which will
2556 * hopefully have some chain frames attached that we can
2557 * use. If we wanted to get smarter about it, we would
2558 * only unfreeze the queue in this condition when we're
2559 * sure that we're getting some chain frames back. That's
2560 * probably unnecessary.
2562 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2563 xpt_freeze_simq(sassc->sim, 1);
2564 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2565 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2566 "freezing SIM queue\n");
2571 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2572 * flag, and use it in a few places in the rest of this function for
2573 * convenience. Use the macro if available.
2575 #if __FreeBSD_version >= 1100103
2576 scsi_cdb = scsiio_cdb_ptr(csio);
2578 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2579 scsi_cdb = csio->cdb_io.cdb_ptr;
2581 scsi_cdb = csio->cdb_io.cdb_bytes;
2585 * If this is a Start Stop Unit command and it was issued by the driver
2586 * during shutdown, decrement the refcount to account for all of the
2587 * commands that were sent. All SSU commands should be completed before
2588 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2591 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2592 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2596 /* Take the fast path to completion */
2597 if (cm->cm_reply == NULL) {
2598 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2599 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2600 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2602 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2603 csio->scsi_status = SCSI_STATUS_OK;
2605 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2606 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2607 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2608 mpr_dprint(sc, MPR_XINFO,
2609 "Unfreezing SIM queue\n");
2614 * There are two scenarios where the status won't be
2615 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2616 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2618 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2620 * Freeze the dev queue so that commands are
2621 * executed in the correct order after error
2624 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2625 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2627 mpr_free_command(sc, cm);
2632 target = &sassc->targets[target_id];
2633 if (scsi_cdb[0] == UNMAP &&
2635 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2636 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2637 csio->scsi_status = rep->SCSIStatus;
2640 mprsas_log_command(cm, MPR_XINFO,
2641 "ioc %x scsi %x state %x xfer %u\n",
2642 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2643 le32toh(rep->TransferCount));
2645 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2646 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2647 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2649 case MPI2_IOCSTATUS_SUCCESS:
2650 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2651 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2652 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2653 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2655 /* Completion failed at the transport level. */
2656 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2657 MPI2_SCSI_STATE_TERMINATED)) {
2658 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2662 /* In a modern packetized environment, an autosense failure
2663 * implies that there's not much else that can be done to
2664 * recover the command.
2666 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2667 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2672 * CAM doesn't care about SAS Response Info data, but if this is
2673 * the state check if TLR should be done. If not, clear the
2674 * TLR_bits for the target.
2676 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2677 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2678 == MPR_SCSI_RI_INVALID_FRAME)) {
2679 sc->mapping_table[target_id].TLR_bits =
2680 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2684 * Intentionally override the normal SCSI status reporting
2685 * for these two cases. These are likely to happen in a
2686 * multi-initiator environment, and we want to make sure that
2687 * CAM retries these commands rather than fail them.
2689 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2690 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2691 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2695 /* Handle normal status and sense */
2696 csio->scsi_status = rep->SCSIStatus;
2697 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2698 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2700 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2702 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2703 int sense_len, returned_sense_len;
2705 returned_sense_len = min(le32toh(rep->SenseCount),
2706 sizeof(struct scsi_sense_data));
2707 if (returned_sense_len < csio->sense_len)
2708 csio->sense_resid = csio->sense_len -
2711 csio->sense_resid = 0;
2713 sense_len = min(returned_sense_len,
2714 csio->sense_len - csio->sense_resid);
2715 bzero(&csio->sense_data, sizeof(csio->sense_data));
2716 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2717 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2721 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2722 * and it's page code 0 (Supported Page List), and there is
2723 * inquiry data, and this is for a sequential access device, and
2724 * the device is an SSP target, and TLR is supported by the
2725 * controller, turn the TLR_bits value ON if page 0x90 is
2728 if ((scsi_cdb[0] == INQUIRY) &&
2729 (scsi_cdb[1] & SI_EVPD) &&
2730 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2731 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2732 (csio->data_ptr != NULL) &&
2733 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2734 (sc->control_TLR) &&
2735 (sc->mapping_table[target_id].device_info &
2736 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2737 vpd_list = (struct scsi_vpd_supported_page_list *)
2739 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2740 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2741 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2742 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2743 alloc_len -= csio->resid;
2744 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2745 if (vpd_list->list[i] == 0x90) {
2753 * If this is a SATA direct-access end device, mark it so that
2754 * a SCSI StartStopUnit command will be sent to it when the
2755 * driver is being shutdown.
2757 if ((scsi_cdb[0] == INQUIRY) &&
2758 (csio->data_ptr != NULL) &&
2759 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2760 (sc->mapping_table[target_id].device_info &
2761 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2762 ((sc->mapping_table[target_id].device_info &
2763 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2764 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2765 target = &sassc->targets[target_id];
2766 target->supports_SSU = TRUE;
2767 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2771 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2772 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2774 * If devinfo is 0 this will be a volume. In that case don't
2775 * tell CAM that the volume is not there. We want volumes to
2776 * be enumerated until they are deleted/removed, not just
2779 if (cm->cm_targ->devinfo == 0)
2780 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2782 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2784 case MPI2_IOCSTATUS_INVALID_SGL:
2785 mpr_print_scsiio_cmd(sc, cm);
2786 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2788 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2790 * This is one of the responses that comes back when an I/O
2791 * has been aborted. If it is because of a timeout that we
2792 * initiated, just set the status to CAM_CMD_TIMEOUT.
2793 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2794 * command is the same (it gets retried, subject to the
2795 * retry counter), the only difference is what gets printed
2798 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2799 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2801 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2803 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2804 /* resid is ignored for this condition */
2806 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2808 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2809 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2811 * These can sometimes be transient transport-related
2812 * errors, and sometimes persistent drive-related errors.
2813 * We used to retry these without decrementing the retry
2814 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2815 * we hit a persistent drive problem that returns one of
2816 * these error codes, we would retry indefinitely. So,
2817 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2818 * count and avoid infinite retries. We're taking the
2819 * potential risk of flagging false failures in the event
2820 * of a topology-related error (e.g. a SAS expander problem
2821 * causes a command addressed to a drive to fail), but
2822 * avoiding getting into an infinite retry loop.
2824 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2825 mpr_dprint(sc, MPR_INFO,
2826 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2827 mpr_describe_table(mpr_iocstatus_string,
2828 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2829 target_id, cm->cm_desc.Default.SMID,
2830 le32toh(rep->IOCLogInfo));
2831 mpr_dprint(sc, MPR_XINFO,
2832 "SCSIStatus %x SCSIState %x xfercount %u\n",
2833 rep->SCSIStatus, rep->SCSIState,
2834 le32toh(rep->TransferCount));
2836 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2837 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2838 case MPI2_IOCSTATUS_INVALID_VPID:
2839 case MPI2_IOCSTATUS_INVALID_FIELD:
2840 case MPI2_IOCSTATUS_INVALID_STATE:
2841 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2842 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2843 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2844 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2845 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2847 mprsas_log_command(cm, MPR_XINFO,
2848 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2849 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2850 rep->SCSIStatus, rep->SCSIState,
2851 le32toh(rep->TransferCount));
2852 csio->resid = cm->cm_length;
2854 if (scsi_cdb[0] == UNMAP &&
2856 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2857 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2859 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2864 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2866 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2867 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2868 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2869 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2873 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2874 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2875 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2878 mpr_free_command(sc, cm);
2882 #if __FreeBSD_version >= 900026
2884 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2886 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2887 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2891 ccb = cm->cm_complete_data;
2894 * Currently there should be no way we can hit this case. It only
2895 * happens when we have a failure to allocate chain frames, and SMP
2896 * commands require two S/G elements only. That should be handled
2897 * in the standard request size.
2899 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2900 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2901 "request!\n", __func__, cm->cm_flags);
2902 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2906 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2908 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2909 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2913 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2914 sasaddr = le32toh(req->SASAddress.Low);
2915 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2917 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2918 MPI2_IOCSTATUS_SUCCESS ||
2919 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2920 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2921 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2922 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2926 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2927 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2929 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2930 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2932 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2936 * We sync in both directions because we had DMAs in the S/G list
2937 * in both directions.
2939 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2940 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2941 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2942 mpr_free_command(sc, cm);
2947 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2949 struct mpr_command *cm;
2950 uint8_t *request, *response;
2951 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2952 struct mpr_softc *sc;
2960 #if (__FreeBSD_version >= 1000028) || \
2961 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2962 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2963 case CAM_DATA_PADDR:
2964 case CAM_DATA_SG_PADDR:
2966 * XXX We don't yet support physical addresses here.
2968 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2969 "supported\n", __func__);
2970 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2975 * The chip does not support more than one buffer for the
2976 * request or response.
2978 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2979 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2980 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2981 "response buffer segments not supported for SMP\n",
2983 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2989 * The CAM_SCATTER_VALID flag was originally implemented
2990 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2991 * We have two. So, just take that flag to mean that we
2992 * might have S/G lists, and look at the S/G segment count
2993 * to figure out whether that is the case for each individual
2996 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2997 bus_dma_segment_t *req_sg;
2999 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3000 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3002 request = ccb->smpio.smp_request;
3004 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3005 bus_dma_segment_t *rsp_sg;
3007 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3008 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3010 response = ccb->smpio.smp_response;
3012 case CAM_DATA_VADDR:
3013 request = ccb->smpio.smp_request;
3014 response = ccb->smpio.smp_response;
3017 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3021 #else /* __FreeBSD_version < 1000028 */
3023 * XXX We don't yet support physical addresses here.
3025 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3026 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3027 "supported\n", __func__);
3028 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3034 * If the user wants to send an S/G list, check to make sure they
3035 * have single buffers.
3037 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3039 * The chip does not support more than one buffer for the
3040 * request or response.
3042 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3043 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3044 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3045 "response buffer segments not supported for SMP\n",
3047 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3053 * The CAM_SCATTER_VALID flag was originally implemented
3054 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3055 * We have two. So, just take that flag to mean that we
3056 * might have S/G lists, and look at the S/G segment count
3057 * to figure out whether that is the case for each individual
3060 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3061 bus_dma_segment_t *req_sg;
3063 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3064 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3066 request = ccb->smpio.smp_request;
3068 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3069 bus_dma_segment_t *rsp_sg;
3071 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3072 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3074 response = ccb->smpio.smp_response;
3076 request = ccb->smpio.smp_request;
3077 response = ccb->smpio.smp_response;
3079 #endif /* __FreeBSD_version < 1000028 */
3081 cm = mpr_alloc_command(sc);
3083 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3085 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3090 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3091 bzero(req, sizeof(*req));
3092 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3094 /* Allow the chip to use any route to this SAS address. */
3095 req->PhysicalPort = 0xff;
3097 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3099 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3101 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3102 "%#jx\n", __func__, (uintmax_t)sasaddr);
3104 mpr_init_sge(cm, req, &req->SGL);
3107 * Set up a uio to pass into mpr_map_command(). This allows us to
3108 * do one map command, and one busdma call in there.
3110 cm->cm_uio.uio_iov = cm->cm_iovec;
3111 cm->cm_uio.uio_iovcnt = 2;
3112 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3115 * The read/write flag isn't used by busdma, but set it just in
3116 * case. This isn't exactly accurate, either, since we're going in
3119 cm->cm_uio.uio_rw = UIO_WRITE;
3121 cm->cm_iovec[0].iov_base = request;
3122 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3123 cm->cm_iovec[1].iov_base = response;
3124 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3126 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3127 cm->cm_iovec[1].iov_len;
3130 * Trigger a warning message in mpr_data_cb() for the user if we
3131 * wind up exceeding two S/G segments. The chip expects one
3132 * segment for the request and another for the response.
3134 cm->cm_max_segs = 2;
3136 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3137 cm->cm_complete = mprsas_smpio_complete;
3138 cm->cm_complete_data = ccb;
3141 * Tell the mapping code that we're using a uio, and that this is
3142 * an SMP passthrough request. There is a little special-case
3143 * logic there (in mpr_data_cb()) to handle the bidirectional
3146 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3147 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3149 /* The chip data format is little endian. */
3150 req->SASAddress.High = htole32(sasaddr >> 32);
3151 req->SASAddress.Low = htole32(sasaddr);
3154 * XXX Note that we don't have a timeout/abort mechanism here.
3155 * From the manual, it looks like task management requests only
3156 * work for SCSI IO and SATA passthrough requests. We may need to
3157 * have a mechanism to retry requests in the event of a chip reset
3158 * at least. Hopefully the chip will insure that any errors short
3159 * of that are relayed back to the driver.
3161 error = mpr_map_command(sc, cm);
3162 if ((error != 0) && (error != EINPROGRESS)) {
3163 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3164 "mpr_map_command()\n", __func__, error);
3171 mpr_free_command(sc, cm);
3172 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3178 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3180 struct mpr_softc *sc;
3181 struct mprsas_target *targ;
3182 uint64_t sasaddr = 0;
3187 * Make sure the target exists.
3189 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3190 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3191 targ = &sassc->targets[ccb->ccb_h.target_id];
3192 if (targ->handle == 0x0) {
3193 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3194 __func__, ccb->ccb_h.target_id);
3195 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3201 * If this device has an embedded SMP target, we'll talk to it
3203 * figure out what the expander's address is.
3205 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3206 sasaddr = targ->sasaddr;
3209 * If we don't have a SAS address for the expander yet, try
3210 * grabbing it from the page 0x83 information cached in the
3211 * transport layer for this target. LSI expanders report the
3212 * expander SAS address as the port-associated SAS address in
3213 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3216 * XXX KDM disable this for now, but leave it commented out so that
3217 * it is obvious that this is another possible way to get the SAS
3220 * The parent handle method below is a little more reliable, and
3221 * the other benefit is that it works for devices other than SES
3222 * devices. So you can send a SMP request to a da(4) device and it
3223 * will get routed to the expander that device is attached to.
3224 * (Assuming the da(4) device doesn't contain an SMP target...)
3228 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3232 * If we still don't have a SAS address for the expander, look for
3233 * the parent device of this device, which is probably the expander.
3236 #ifdef OLD_MPR_PROBE
3237 struct mprsas_target *parent_target;
3240 if (targ->parent_handle == 0x0) {
3241 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3242 "a valid parent handle!\n", __func__, targ->handle);
3243 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3246 #ifdef OLD_MPR_PROBE
3247 parent_target = mprsas_find_target_by_handle(sassc, 0,
3248 targ->parent_handle);
3250 if (parent_target == NULL) {
3251 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3252 "a valid parent target!\n", __func__, targ->handle);
3253 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3257 if ((parent_target->devinfo &
3258 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3259 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3260 "does not have an SMP target!\n", __func__,
3261 targ->handle, parent_target->handle);
3262 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3266 sasaddr = parent_target->sasaddr;
3267 #else /* OLD_MPR_PROBE */
3268 if ((targ->parent_devinfo &
3269 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3270 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3271 "does not have an SMP target!\n", __func__,
3272 targ->handle, targ->parent_handle);
3273 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3277 if (targ->parent_sasaddr == 0x0) {
3278 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3279 "%d does not have a valid SAS address!\n", __func__,
3280 targ->handle, targ->parent_handle);
3281 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3285 sasaddr = targ->parent_sasaddr;
3286 #endif /* OLD_MPR_PROBE */
3291 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3292 "handle %d\n", __func__, targ->handle);
3293 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3296 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3304 #endif //__FreeBSD_version >= 900026
3307 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3309 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3310 struct mpr_softc *sc;
3311 struct mpr_command *tm;
3312 struct mprsas_target *targ;
3314 MPR_FUNCTRACE(sassc->sc);
3315 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3317 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3318 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3320 tm = mpr_alloc_command(sc);
3322 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3323 "mprsas_action_resetdev\n");
3324 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3329 targ = &sassc->targets[ccb->ccb_h.target_id];
3330 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3331 req->DevHandle = htole16(targ->handle);
3332 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3333 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3335 /* SAS Hard Link Reset / SATA Link Reset */
3336 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3339 tm->cm_desc.HighPriority.RequestFlags =
3340 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3341 tm->cm_complete = mprsas_resetdev_complete;
3342 tm->cm_complete_data = ccb;
3344 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3345 __func__, targ->tid);
3347 targ->flags |= MPRSAS_TARGET_INRESET;
3349 mpr_map_command(sc, tm);
3353 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3355 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3359 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3361 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3362 ccb = tm->cm_complete_data;
3365 * Currently there should be no way we can hit this case. It only
3366 * happens when we have a failure to allocate chain frames, and
3367 * task management commands don't have S/G lists.
3369 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3370 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3372 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3374 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3375 "handle %#04x! This should not happen!\n", __func__,
3376 tm->cm_flags, req->DevHandle);
3377 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3381 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3382 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3384 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3385 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3386 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3390 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3394 mprsas_free_tm(sc, tm);
3399 mprsas_poll(struct cam_sim *sim)
3401 struct mprsas_softc *sassc;
3403 sassc = cam_sim_softc(sim);
3405 if (sassc->sc->mpr_debug & MPR_TRACE) {
3406 /* frequent debug messages during a panic just slow
3407 * everything down too much.
3409 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3411 sassc->sc->mpr_debug &= ~MPR_TRACE;
3414 mpr_intr_locked(sassc->sc);
3418 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3421 struct mpr_softc *sc;
3423 sc = (struct mpr_softc *)callback_arg;
3426 #if (__FreeBSD_version >= 1000006) || \
3427 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3428 case AC_ADVINFO_CHANGED: {
3429 struct mprsas_target *target;
3430 struct mprsas_softc *sassc;
3431 struct scsi_read_capacity_data_long rcap_buf;
3432 struct ccb_dev_advinfo cdai;
3433 struct mprsas_lun *lun;
3438 buftype = (uintptr_t)arg;
3444 * We're only interested in read capacity data changes.
3446 if (buftype != CDAI_TYPE_RCAPLONG)
3450 * See the comment in mpr_attach_sas() for a detailed
3451 * explanation. In these versions of FreeBSD we register
3452 * for all events and filter out the events that don't
3455 #if (__FreeBSD_version < 1000703) || \
3456 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3457 if (xpt_path_path_id(path) != sassc->sim->path_id)
3462 * We should have a handle for this, but check to make sure.
3464 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3465 ("Target %d out of bounds in mprsas_async\n",
3466 xpt_path_target_id(path)));
3467 target = &sassc->targets[xpt_path_target_id(path)];
3468 if (target->handle == 0)
3471 lunid = xpt_path_lun_id(path);
3473 SLIST_FOREACH(lun, &target->luns, lun_link) {
3474 if (lun->lun_id == lunid) {
3480 if (found_lun == 0) {
3481 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3484 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3485 "LUN for EEDP support.\n");
3488 lun->lun_id = lunid;
3489 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3492 bzero(&rcap_buf, sizeof(rcap_buf));
3493 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3494 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3495 cdai.ccb_h.flags = CAM_DIR_IN;
3496 cdai.buftype = CDAI_TYPE_RCAPLONG;
3497 #if (__FreeBSD_version >= 1100061) || \
3498 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3499 cdai.flags = CDAI_FLAG_NONE;
3503 cdai.bufsiz = sizeof(rcap_buf);
3504 cdai.buf = (uint8_t *)&rcap_buf;
3505 xpt_action((union ccb *)&cdai);
3506 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3507 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3509 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3510 && (rcap_buf.prot & SRC16_PROT_EN)) {
3511 lun->eedp_formatted = TRUE;
3512 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3514 lun->eedp_formatted = FALSE;
3515 lun->eedp_block_size = 0;
3520 case AC_FOUND_DEVICE: {
3521 struct ccb_getdev *cgd;
3524 * See the comment in mpr_attach_sas() for a detailed
3525 * explanation. In these versions of FreeBSD we register
3526 * for all events and filter out the events that don't
3529 #if (__FreeBSD_version < 1000703) || \
3530 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3531 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3536 #if (__FreeBSD_version < 901503) || \
3537 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3538 mprsas_check_eedp(sc, path, cgd);
3547 #if (__FreeBSD_version < 901503) || \
3548 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3550 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3551 struct ccb_getdev *cgd)
3553 struct mprsas_softc *sassc = sc->sassc;
3554 struct ccb_scsiio *csio;
3555 struct scsi_read_capacity_16 *scsi_cmd;
3556 struct scsi_read_capacity_eedp *rcap_buf;
3558 target_id_t targetid;
3561 struct cam_path *local_path;
3562 struct mprsas_target *target;
3563 struct mprsas_lun *lun;
3567 pathid = cam_sim_path(sassc->sim);
3568 targetid = xpt_path_target_id(path);
3569 lunid = xpt_path_lun_id(path);
3571 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3572 "mprsas_check_eedp\n", targetid));
3573 target = &sassc->targets[targetid];
3574 if (target->handle == 0x0)
3578 * Determine if the device is EEDP capable.
3580 * If this flag is set in the inquiry data, the device supports
3581 * protection information, and must support the 16 byte read capacity
3582 * command, otherwise continue without sending read cap 16.
3584 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3588 * Issue a READ CAPACITY 16 command. This info is used to determine if
3589 * the LUN is formatted for EEDP support.
3591 ccb = xpt_alloc_ccb_nowait();
3593 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3598 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3600 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3607 * If LUN is already in list, don't create a new one.
3610 SLIST_FOREACH(lun, &target->luns, lun_link) {
3611 if (lun->lun_id == lunid) {
3617 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3620 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3622 xpt_free_path(local_path);
3626 lun->lun_id = lunid;
3627 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3630 xpt_path_string(local_path, path_str, sizeof(path_str));
3631 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3632 path_str, target->handle);
3635 * Issue a READ CAPACITY 16 command for the LUN. The
3636 * mprsas_read_cap_done function will load the read cap info into the
3639 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3641 if (rcap_buf == NULL) {
3642 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3643 "buffer for EEDP support.\n");
3644 xpt_free_path(ccb->ccb_h.path);
3648 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3650 csio->ccb_h.func_code = XPT_SCSI_IO;
3651 csio->ccb_h.flags = CAM_DIR_IN;
3652 csio->ccb_h.retry_count = 4;
3653 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3654 csio->ccb_h.timeout = 60000;
3655 csio->data_ptr = (uint8_t *)rcap_buf;
3656 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3657 csio->sense_len = MPR_SENSE_LEN;
3658 csio->cdb_len = sizeof(*scsi_cmd);
3659 csio->tag_action = MSG_SIMPLE_Q_TAG;
3661 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3662 bzero(scsi_cmd, sizeof(*scsi_cmd));
3663 scsi_cmd->opcode = 0x9E;
3664 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3665 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3667 ccb->ccb_h.ppriv_ptr1 = sassc;
3672 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3674 struct mprsas_softc *sassc;
3675 struct mprsas_target *target;
3676 struct mprsas_lun *lun;
3677 struct scsi_read_capacity_eedp *rcap_buf;
3679 if (done_ccb == NULL)
3682 /* Driver need to release devq, it Scsi command is
3683 * generated by driver internally.
3684 * Currently there is a single place where driver
3685 * calls scsi command internally. In future if driver
3686 * calls more scsi command internally, it needs to release
3687 * devq internally, since those command will not go back to
3690 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3691 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3692 xpt_release_devq(done_ccb->ccb_h.path,
3693 /*count*/ 1, /*run_queue*/TRUE);
3696 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3699 * Get the LUN ID for the path and look it up in the LUN list for the
3702 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3703 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3704 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3705 target = &sassc->targets[done_ccb->ccb_h.target_id];
3706 SLIST_FOREACH(lun, &target->luns, lun_link) {
3707 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3711 * Got the LUN in the target's LUN list. Fill it in with EEDP
3712 * info. If the READ CAP 16 command had some SCSI error (common
3713 * if command is not supported), mark the lun as not supporting
3714 * EEDP and set the block size to 0.
3716 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3717 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3718 lun->eedp_formatted = FALSE;
3719 lun->eedp_block_size = 0;
3723 if (rcap_buf->protect & 0x01) {
3724 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3725 "%d is formatted for EEDP support.\n",
3726 done_ccb->ccb_h.target_lun,
3727 done_ccb->ccb_h.target_id);
3728 lun->eedp_formatted = TRUE;
3729 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3734 // Finished with this CCB and path.
3735 free(rcap_buf, M_MPR);
3736 xpt_free_path(done_ccb->ccb_h.path);
3737 xpt_free_ccb(done_ccb);
3739 #endif /* (__FreeBSD_version < 901503) || \
3740 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3743 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3744 struct mprsas_target *target, lun_id_t lun_id)
3750 * Set the INRESET flag for this target so that no I/O will be sent to
3751 * the target until the reset has completed. If an I/O request does
3752 * happen, the devq will be frozen. The CCB holds the path which is
3753 * used to release the devq. The devq is released and the CCB is freed
3754 * when the TM completes.
3756 ccb = xpt_alloc_ccb_nowait();
3758 path_id = cam_sim_path(sc->sassc->sim);
3759 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3760 target->tid, lun_id) != CAM_REQ_CMP) {
3764 tm->cm_targ = target;
3765 target->flags |= MPRSAS_TARGET_INRESET;
3771 mprsas_startup(struct mpr_softc *sc)
3774 * Send the port enable message and set the wait_for_port_enable flag.
3775 * This flag helps to keep the simq frozen until all discovery events
3778 sc->wait_for_port_enable = 1;
3779 mprsas_send_portenable(sc);
3784 mprsas_send_portenable(struct mpr_softc *sc)
3786 MPI2_PORT_ENABLE_REQUEST *request;
3787 struct mpr_command *cm;
3791 if ((cm = mpr_alloc_command(sc)) == NULL)
3793 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3794 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3795 request->MsgFlags = 0;
3797 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3798 cm->cm_complete = mprsas_portenable_complete;
3802 mpr_map_command(sc, cm);
3803 mpr_dprint(sc, MPR_XINFO,
3804 "mpr_send_portenable finished cm %p req %p complete %p\n",
3805 cm, cm->cm_req, cm->cm_complete);
3810 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3812 MPI2_PORT_ENABLE_REPLY *reply;
3813 struct mprsas_softc *sassc;
3819 * Currently there should be no way we can hit this case. It only
3820 * happens when we have a failure to allocate chain frames, and
3821 * port enable commands don't have S/G lists.
3823 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3824 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3825 "This should not happen!\n", __func__, cm->cm_flags);
3828 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3830 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3831 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3832 MPI2_IOCSTATUS_SUCCESS)
3833 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3835 mpr_free_command(sc, cm);
3837 * Done waiting for port enable to complete. Decrement the refcount.
3838 * If refcount is 0, discovery is complete and a rescan of the bus can
3841 sc->wait_for_port_enable = 0;
3842 sc->port_enable_complete = 1;
3843 wakeup(&sc->port_enable_complete);
3844 mprsas_startup_decrement(sassc);
3848 mprsas_check_id(struct mprsas_softc *sassc, int id)
3850 struct mpr_softc *sc = sassc->sc;
3854 ids = &sc->exclude_ids[0];
3855 while((name = strsep(&ids, ",")) != NULL) {
3856 if (name[0] == '\0')
3858 if (strtol(name, NULL, 0) == (long)id)
3866 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3868 struct mprsas_softc *sassc;
3869 struct mprsas_lun *lun, *lun_tmp;
3870 struct mprsas_target *targ;
3875 * The number of targets is based on IOC Facts, so free all of
3876 * the allocated LUNs for each target and then the target buffer
3879 for (i=0; i< maxtargets; i++) {
3880 targ = &sassc->targets[i];
3881 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3885 free(sassc->targets, M_MPR);
3887 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3888 M_MPR, M_WAITOK|M_ZERO);
3889 if (!sassc->targets) {
3890 panic("%s failed to alloc targets with error %d\n",