2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 /* Communications core for Avago Technologies (LSI) MPT3 */
38 /* TODO Move headers to mprvar */
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/selinfo.h>
44 #include <sys/module.h>
48 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/endian.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
57 #include <machine/bus.h>
58 #include <machine/resource.h>
61 #include <machine/stdarg.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_debug.h>
66 #include <cam/cam_sim.h>
67 #include <cam/cam_xpt_sim.h>
68 #include <cam/cam_xpt_periph.h>
69 #include <cam/cam_periph.h>
70 #include <cam/scsi/scsi_all.h>
71 #include <cam/scsi/scsi_message.h>
72 #if __FreeBSD_version >= 900026
73 #include <cam/scsi/smp_all.h>
76 #include <dev/nvme/nvme.h>
78 #include <dev/mpr/mpi/mpi2_type.h>
79 #include <dev/mpr/mpi/mpi2.h>
80 #include <dev/mpr/mpi/mpi2_ioc.h>
81 #include <dev/mpr/mpi/mpi2_sas.h>
82 #include <dev/mpr/mpi/mpi2_pci.h>
83 #include <dev/mpr/mpi/mpi2_cnfg.h>
84 #include <dev/mpr/mpi/mpi2_init.h>
85 #include <dev/mpr/mpi/mpi2_tool.h>
86 #include <dev/mpr/mpr_ioctl.h>
87 #include <dev/mpr/mprvar.h>
88 #include <dev/mpr/mpr_table.h>
89 #include <dev/mpr/mpr_sas.h>
91 #define MPRSAS_DISCOVERY_TIMEOUT 20
92 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
95 * static array to check SCSI OpCode for EEDP protection bits
97 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
119 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
121 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
123 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mprsas_poll(struct cam_sim *sim);
125 static void mprsas_scsiio_timeout(void *data);
126 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
127 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
128 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
129 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
130 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
131 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
132 struct mpr_command *cm);
133 static void mprsas_async(void *callback_arg, uint32_t code,
134 struct cam_path *path, void *arg);
135 #if (__FreeBSD_version < 901503) || \
136 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
137 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
138 struct ccb_getdev *cgd);
139 static void mprsas_read_cap_done(struct cam_periph *periph,
140 union ccb *done_ccb);
142 static int mprsas_send_portenable(struct mpr_softc *sc);
143 static void mprsas_portenable_complete(struct mpr_softc *sc,
144 struct mpr_command *cm);
146 #if __FreeBSD_version >= 900026
147 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
148 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
150 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
151 #endif //FreeBSD_version >= 900026
153 struct mprsas_target *
154 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
157 struct mprsas_target *target;
160 for (i = start; i < sassc->maxtargets; i++) {
161 target = &sassc->targets[i];
162 if (target->handle == handle)
169 /* we need to freeze the simq during attach and diag reset, to avoid failing
170 * commands before device handles have been found by discovery. Since
171 * discovery involves reading config pages and possibly sending commands,
172 * discovery actions may continue even after we receive the end of discovery
173 * event, so refcount discovery actions instead of assuming we can unfreeze
174 * the simq when we get the event.
177 mprsas_startup_increment(struct mprsas_softc *sassc)
179 MPR_FUNCTRACE(sassc->sc);
181 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
182 if (sassc->startup_refcount++ == 0) {
183 /* just starting, freeze the simq */
184 mpr_dprint(sassc->sc, MPR_INIT,
185 "%s freezing simq\n", __func__);
186 #if (__FreeBSD_version >= 1000039) || \
187 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
190 xpt_freeze_simq(sassc->sim, 1);
192 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
193 sassc->startup_refcount);
198 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
200 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
201 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
202 xpt_release_simq(sassc->sim, 1);
203 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
208 mprsas_startup_decrement(struct mprsas_softc *sassc)
210 MPR_FUNCTRACE(sassc->sc);
212 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
213 if (--sassc->startup_refcount == 0) {
214 /* finished all discovery-related actions, release
215 * the simq and rescan for the latest topology.
217 mpr_dprint(sassc->sc, MPR_INIT,
218 "%s releasing simq\n", __func__);
219 sassc->flags &= ~MPRSAS_IN_STARTUP;
220 xpt_release_simq(sassc->sim, 1);
221 #if (__FreeBSD_version >= 1000039) || \
222 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
225 mprsas_rescan_target(sassc->sc, NULL);
228 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
229 sassc->startup_refcount);
234 * The firmware requires us to stop sending commands when we're doing task
237 * XXX The logic for serializing the device has been made lazy and moved to
238 * mprsas_prepare_for_tm().
241 mprsas_alloc_tm(struct mpr_softc *sc)
243 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
244 struct mpr_command *tm;
247 tm = mpr_alloc_high_priority_command(sc);
251 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
252 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
257 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
259 int target_id = 0xFFFFFFFF;
266 * For TM's the devq is frozen for the device. Unfreeze it here and
267 * free the resources used for freezing the devq. Must clear the
268 * INRESET flag as well or scsi I/O will not work.
270 if (tm->cm_targ != NULL) {
271 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
272 target_id = tm->cm_targ->tid;
275 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
277 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
278 xpt_free_path(tm->cm_ccb->ccb_h.path);
279 xpt_free_ccb(tm->cm_ccb);
282 mpr_free_high_priority_command(sc, tm);
286 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
288 struct mprsas_softc *sassc = sc->sassc;
290 target_id_t targetid;
294 pathid = cam_sim_path(sassc->sim);
296 targetid = CAM_TARGET_WILDCARD;
298 targetid = targ - sassc->targets;
301 * Allocate a CCB and schedule a rescan.
303 ccb = xpt_alloc_ccb_nowait();
305 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
309 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
310 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
311 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
316 if (targetid == CAM_TARGET_WILDCARD)
317 ccb->ccb_h.func_code = XPT_SCAN_BUS;
319 ccb->ccb_h.func_code = XPT_SCAN_TGT;
321 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
326 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
336 /* No need to be in here if debugging isn't enabled */
337 if ((cm->cm_sc->mpr_debug & level) == 0)
340 sbuf_new(&sb, str, sizeof(str), 0);
344 if (cm->cm_ccb != NULL) {
345 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
347 sbuf_cat(&sb, path_str);
348 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
349 scsi_command_string(&cm->cm_ccb->csio, &sb);
350 sbuf_printf(&sb, "length %d ",
351 cm->cm_ccb->csio.dxfer_len);
354 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
355 cam_sim_name(cm->cm_sc->sassc->sim),
356 cam_sim_unit(cm->cm_sc->sassc->sim),
357 cam_sim_bus(cm->cm_sc->sassc->sim),
358 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
362 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
363 sbuf_vprintf(&sb, fmt, ap);
365 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
371 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
373 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
374 struct mprsas_target *targ;
379 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
380 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
384 /* XXX retry the remove after the diag reset completes? */
385 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
386 "0x%04x\n", __func__, handle);
387 mprsas_free_tm(sc, tm);
391 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
392 MPI2_IOCSTATUS_SUCCESS) {
393 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
394 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
397 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
398 le32toh(reply->TerminationCount));
399 mpr_free_reply(sc, tm->cm_reply_data);
400 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
402 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
406 * Don't clear target if remove fails because things will get confusing.
407 * Leave the devname and sasaddr intact so that we know to avoid reusing
408 * this target id if possible, and so we can assign the same target id
409 * to this device if it comes back in the future.
411 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
412 MPI2_IOCSTATUS_SUCCESS) {
415 targ->encl_handle = 0x0;
416 targ->encl_level_valid = 0x0;
417 targ->encl_level = 0x0;
418 targ->connector_name[0] = ' ';
419 targ->connector_name[1] = ' ';
420 targ->connector_name[2] = ' ';
421 targ->connector_name[3] = ' ';
422 targ->encl_slot = 0x0;
423 targ->exp_dev_handle = 0x0;
425 targ->linkrate = 0x0;
428 targ->scsi_req_desc_type = 0;
431 mprsas_free_tm(sc, tm);
436 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
437 * Otherwise Volume Delete is same as Bare Drive Removal.
440 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
442 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
443 struct mpr_softc *sc;
444 struct mpr_command *cm;
445 struct mprsas_target *targ = NULL;
447 MPR_FUNCTRACE(sassc->sc);
450 targ = mprsas_find_target_by_handle(sassc, 0, handle);
452 /* FIXME: what is the action? */
453 /* We don't know about this device? */
454 mpr_dprint(sc, MPR_ERROR,
455 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
459 targ->flags |= MPRSAS_TARGET_INREMOVAL;
461 cm = mprsas_alloc_tm(sc);
463 mpr_dprint(sc, MPR_ERROR,
464 "%s: command alloc failure\n", __func__);
468 mprsas_rescan_target(sc, targ);
470 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
471 req->DevHandle = targ->handle;
472 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
474 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
475 /* SAS Hard Link Reset / SATA Link Reset */
476 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
478 /* PCIe Protocol Level Reset*/
480 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
485 cm->cm_complete = mprsas_remove_volume;
486 cm->cm_complete_data = (void *)(uintptr_t)handle;
488 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
489 __func__, targ->tid);
490 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
492 mpr_map_command(sc, cm);
496 * The firmware performs debounce on the link to avoid transient link errors
497 * and false removals. When it does decide that link has been lost and a
498 * device needs to go away, it expects that the host will perform a target reset
499 * and then an op remove. The reset has the side-effect of aborting any
500 * outstanding requests for the device, which is required for the op-remove to
501 * succeed. It's not clear if the host should check for the device coming back
502 * alive after the reset.
505 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
507 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
508 struct mpr_softc *sc;
509 struct mpr_command *tm;
510 struct mprsas_target *targ = NULL;
512 MPR_FUNCTRACE(sassc->sc);
516 targ = mprsas_find_target_by_handle(sassc, 0, handle);
518 /* FIXME: what is the action? */
519 /* We don't know about this device? */
520 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
525 targ->flags |= MPRSAS_TARGET_INREMOVAL;
527 tm = mprsas_alloc_tm(sc);
529 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
534 mprsas_rescan_target(sc, targ);
536 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
537 memset(req, 0, sizeof(*req));
538 req->DevHandle = htole16(targ->handle);
539 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
541 /* SAS Hard Link Reset / SATA Link Reset */
542 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
546 tm->cm_complete = mprsas_remove_device;
547 tm->cm_complete_data = (void *)(uintptr_t)handle;
549 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
550 __func__, targ->tid);
551 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
553 mpr_map_command(sc, tm);
557 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
559 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
560 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
561 struct mprsas_target *targ;
562 struct mpr_command *next_cm;
567 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
568 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
572 * Currently there should be no way we can hit this case. It only
573 * happens when we have a failure to allocate chain frames, and
574 * task management commands don't have S/G lists.
576 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
577 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
578 "handle %#04x! This should not happen!\n", __func__,
579 tm->cm_flags, handle);
583 /* XXX retry the remove after the diag reset completes? */
584 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
585 "0x%04x\n", __func__, handle);
586 mprsas_free_tm(sc, tm);
590 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
591 MPI2_IOCSTATUS_SUCCESS) {
592 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
593 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
596 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
597 le32toh(reply->TerminationCount));
598 mpr_free_reply(sc, tm->cm_reply_data);
599 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
601 /* Reuse the existing command */
602 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
603 memset(req, 0, sizeof(*req));
604 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
605 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
606 req->DevHandle = htole16(handle);
608 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
609 tm->cm_complete = mprsas_remove_complete;
610 tm->cm_complete_data = (void *)(uintptr_t)handle;
612 mpr_map_command(sc, tm);
614 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
616 if (targ->encl_level_valid) {
617 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
618 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
619 targ->connector_name);
621 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
624 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
625 ccb = tm->cm_complete_data;
626 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
627 mprsas_scsiio_complete(sc, tm);
632 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
634 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
636 struct mprsas_target *targ;
637 struct mprsas_lun *lun;
641 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
642 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
645 * Currently there should be no way we can hit this case. It only
646 * happens when we have a failure to allocate chain frames, and
647 * task management commands don't have S/G lists.
649 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
650 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
651 "handle %#04x! This should not happen!\n", __func__,
652 tm->cm_flags, handle);
653 mprsas_free_tm(sc, tm);
658 /* most likely a chip reset */
659 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
660 "0x%04x\n", __func__, handle);
661 mprsas_free_tm(sc, tm);
665 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
666 __func__, handle, le16toh(reply->IOCStatus));
669 * Don't clear target if remove fails because things will get confusing.
670 * Leave the devname and sasaddr intact so that we know to avoid reusing
671 * this target id if possible, and so we can assign the same target id
672 * to this device if it comes back in the future.
674 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
675 MPI2_IOCSTATUS_SUCCESS) {
678 targ->encl_handle = 0x0;
679 targ->encl_level_valid = 0x0;
680 targ->encl_level = 0x0;
681 targ->connector_name[0] = ' ';
682 targ->connector_name[1] = ' ';
683 targ->connector_name[2] = ' ';
684 targ->connector_name[3] = ' ';
685 targ->encl_slot = 0x0;
686 targ->exp_dev_handle = 0x0;
688 targ->linkrate = 0x0;
691 targ->scsi_req_desc_type = 0;
693 while (!SLIST_EMPTY(&targ->luns)) {
694 lun = SLIST_FIRST(&targ->luns);
695 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
700 mprsas_free_tm(sc, tm);
704 mprsas_register_events(struct mpr_softc *sc)
709 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
710 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
711 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
712 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
713 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
714 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
715 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
716 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
717 setbit(events, MPI2_EVENT_IR_VOLUME);
718 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
719 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
720 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
721 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
722 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
723 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
724 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
725 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
726 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
727 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
731 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
732 &sc->sassc->mprsas_eh);
738 mpr_attach_sas(struct mpr_softc *sc)
740 struct mprsas_softc *sassc;
742 int unit, error = 0, reqs;
745 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
747 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
749 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
750 "Cannot allocate SAS subsystem memory\n");
755 * XXX MaxTargets could change during a reinit. Since we don't
756 * resize the targets[] array during such an event, cache the value
757 * of MaxTargets here so that we don't get into trouble later. This
758 * should move into the reinit logic.
760 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
761 sassc->targets = malloc(sizeof(struct mprsas_target) *
762 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
763 if (!sassc->targets) {
764 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
765 "Cannot allocate SAS target memory\n");
772 reqs = sc->num_reqs - sc->num_prireqs - 1;
773 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
774 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
779 unit = device_get_unit(sc->mpr_dev);
780 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
781 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
782 if (sassc->sim == NULL) {
783 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
788 TAILQ_INIT(&sassc->ev_queue);
790 /* Initialize taskqueue for Event Handling */
791 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
792 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
793 taskqueue_thread_enqueue, &sassc->ev_tq);
794 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
795 device_get_nameunit(sc->mpr_dev));
800 * XXX There should be a bus for every port on the adapter, but since
801 * we're just going to fake the topology for now, we'll pretend that
802 * everything is just a target on a single bus.
804 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
805 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
806 "Error %d registering SCSI bus\n", error);
812 * Assume that discovery events will start right away.
814 * Hold off boot until discovery is complete.
816 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
817 sc->sassc->startup_refcount = 0;
818 mprsas_startup_increment(sassc);
820 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
823 * Register for async events so we can determine the EEDP
824 * capabilities of devices.
826 status = xpt_create_path(&sassc->path, /*periph*/NULL,
827 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
829 if (status != CAM_REQ_CMP) {
830 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
831 "Error %#x creating sim path\n", status);
836 #if (__FreeBSD_version >= 1000006) || \
837 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
838 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
840 event = AC_FOUND_DEVICE;
844 * Prior to the CAM locking improvements, we can't call
845 * xpt_register_async() with a particular path specified.
847 * If a path isn't specified, xpt_register_async() will
848 * generate a wildcard path and acquire the XPT lock while
849 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
850 * It will then drop the XPT lock once that is done.
852 * If a path is specified for xpt_register_async(), it will
853 * not acquire and drop the XPT lock around the call to
854 * xpt_action(). xpt_action() asserts that the caller
855 * holds the SIM lock, so the SIM lock has to be held when
856 * calling xpt_register_async() when the path is specified.
858 * But xpt_register_async calls xpt_for_all_devices(),
859 * which calls xptbustraverse(), which will acquire each
860 * SIM lock. When it traverses our particular bus, it will
861 * necessarily acquire the SIM lock, which will lead to a
862 * recursive lock acquisition.
864 * The CAM locking changes fix this problem by acquiring
865 * the XPT topology lock around bus traversal in
866 * xptbustraverse(), so the caller can hold the SIM lock
867 * and it does not cause a recursive lock acquisition.
869 * These __FreeBSD_version values are approximate, especially
870 * for stable/10, which is two months later than the actual
874 #if (__FreeBSD_version < 1000703) || \
875 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
877 status = xpt_register_async(event, mprsas_async, sc,
881 status = xpt_register_async(event, mprsas_async, sc,
885 if (status != CAM_REQ_CMP) {
886 mpr_dprint(sc, MPR_ERROR,
887 "Error %#x registering async handler for "
888 "AC_ADVINFO_CHANGED events\n", status);
889 xpt_free_path(sassc->path);
893 if (status != CAM_REQ_CMP) {
895 * EEDP use is the exception, not the rule.
896 * Warn the user, but do not fail to attach.
898 mpr_printf(sc, "EEDP capabilities disabled.\n");
903 mprsas_register_events(sc);
908 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
913 mpr_detach_sas(struct mpr_softc *sc)
915 struct mprsas_softc *sassc;
916 struct mprsas_lun *lun, *lun_tmp;
917 struct mprsas_target *targ;
922 if (sc->sassc == NULL)
926 mpr_deregister_events(sc, sassc->mprsas_eh);
929 * Drain and free the event handling taskqueue with the lock
930 * unheld so that any parallel processing tasks drain properly
931 * without deadlocking.
933 if (sassc->ev_tq != NULL)
934 taskqueue_free(sassc->ev_tq);
936 /* Make sure CAM doesn't wedge if we had to bail out early. */
939 while (sassc->startup_refcount != 0)
940 mprsas_startup_decrement(sassc);
942 /* Deregister our async handler */
943 if (sassc->path != NULL) {
944 xpt_register_async(0, mprsas_async, sc, sassc->path);
945 xpt_free_path(sassc->path);
949 if (sassc->flags & MPRSAS_IN_STARTUP)
950 xpt_release_simq(sassc->sim, 1);
952 if (sassc->sim != NULL) {
953 xpt_bus_deregister(cam_sim_path(sassc->sim));
954 cam_sim_free(sassc->sim, FALSE);
959 if (sassc->devq != NULL)
960 cam_simq_free(sassc->devq);
962 for (i = 0; i < sassc->maxtargets; i++) {
963 targ = &sassc->targets[i];
964 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
968 free(sassc->targets, M_MPR);
976 mprsas_discovery_end(struct mprsas_softc *sassc)
978 struct mpr_softc *sc = sassc->sc;
982 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
983 callout_stop(&sassc->discovery_callout);
986 * After discovery has completed, check the mapping table for any
987 * missing devices and update their missing counts. Only do this once
988 * whenever the driver is initialized so that missing counts aren't
989 * updated unnecessarily. Note that just because discovery has
990 * completed doesn't mean that events have been processed yet. The
991 * check_devices function is a callout timer that checks if ALL devices
992 * are missing. If so, it will wait a little longer for events to
993 * complete and keep resetting itself until some device in the mapping
994 * table is not missing, meaning that event processing has started.
996 if (sc->track_mapping_events) {
997 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
998 "completed. Check for missing devices in the mapping "
1000 callout_reset(&sc->device_check_callout,
1001 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
1007 mprsas_action(struct cam_sim *sim, union ccb *ccb)
1009 struct mprsas_softc *sassc;
1011 sassc = cam_sim_softc(sim);
1013 MPR_FUNCTRACE(sassc->sc);
1014 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1015 ccb->ccb_h.func_code);
1016 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1018 switch (ccb->ccb_h.func_code) {
1021 struct ccb_pathinq *cpi = &ccb->cpi;
1022 struct mpr_softc *sc = sassc->sc;
1024 cpi->version_num = 1;
1025 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1026 cpi->target_sprt = 0;
1027 #if (__FreeBSD_version >= 1000039) || \
1028 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1029 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1031 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1033 cpi->hba_eng_cnt = 0;
1034 cpi->max_target = sassc->maxtargets - 1;
1038 * initiator_id is set here to an ID outside the set of valid
1039 * target IDs (including volumes).
1041 cpi->initiator_id = sassc->maxtargets;
1042 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1043 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1044 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1045 cpi->unit_number = cam_sim_unit(sim);
1046 cpi->bus_id = cam_sim_bus(sim);
1048 * XXXSLM-I think this needs to change based on config page or
1049 * something instead of hardcoded to 150000.
1051 cpi->base_transfer_speed = 150000;
1052 cpi->transport = XPORT_SAS;
1053 cpi->transport_version = 0;
1054 cpi->protocol = PROTO_SCSI;
1055 cpi->protocol_version = SCSI_REV_SPC;
1056 cpi->maxio = sc->maxio;
1057 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1060 case XPT_GET_TRAN_SETTINGS:
1062 struct ccb_trans_settings *cts;
1063 struct ccb_trans_settings_sas *sas;
1064 struct ccb_trans_settings_scsi *scsi;
1065 struct mprsas_target *targ;
1068 sas = &cts->xport_specific.sas;
1069 scsi = &cts->proto_specific.scsi;
1071 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1072 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1073 cts->ccb_h.target_id));
1074 targ = &sassc->targets[cts->ccb_h.target_id];
1075 if (targ->handle == 0x0) {
1076 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1080 cts->protocol_version = SCSI_REV_SPC2;
1081 cts->transport = XPORT_SAS;
1082 cts->transport_version = 0;
1084 sas->valid = CTS_SAS_VALID_SPEED;
1085 switch (targ->linkrate) {
1087 sas->bitrate = 150000;
1090 sas->bitrate = 300000;
1093 sas->bitrate = 600000;
1096 sas->bitrate = 1200000;
1102 cts->protocol = PROTO_SCSI;
1103 scsi->valid = CTS_SCSI_VALID_TQ;
1104 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1106 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1109 case XPT_CALC_GEOMETRY:
1110 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1111 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1114 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1116 mprsas_action_resetdev(sassc, ccb);
1121 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1122 "for abort or reset\n");
1123 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1126 mprsas_action_scsiio(sassc, ccb);
1128 #if __FreeBSD_version >= 900026
1130 mprsas_action_smpio(sassc, ccb);
1134 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1142 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1143 target_id_t target_id, lun_id_t lun_id)
1145 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1146 struct cam_path *path;
1148 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1149 ac_code, target_id, (uintmax_t)lun_id);
1151 if (xpt_create_path(&path, NULL,
1152 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1153 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1158 xpt_async(ac_code, path, NULL);
1159 xpt_free_path(path);
1163 mprsas_complete_all_commands(struct mpr_softc *sc)
1165 struct mpr_command *cm;
1170 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1172 /* complete all commands with a NULL reply */
1173 for (i = 1; i < sc->num_reqs; i++) {
1174 cm = &sc->commands[i];
1175 if (cm->cm_state == MPR_CM_STATE_FREE)
1178 cm->cm_state = MPR_CM_STATE_BUSY;
1179 cm->cm_reply = NULL;
1182 if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1184 free(cm->cm_data, M_MPR);
1188 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1189 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1191 if (cm->cm_complete != NULL) {
1192 mprsas_log_command(cm, MPR_RECOVERY,
1193 "completing cm %p state %x ccb %p for diag reset\n",
1194 cm, cm->cm_state, cm->cm_ccb);
1195 cm->cm_complete(sc, cm);
1197 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1198 mprsas_log_command(cm, MPR_RECOVERY,
1199 "waking up cm %p state %x ccb %p for diag reset\n",
1200 cm, cm->cm_state, cm->cm_ccb);
1205 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1206 /* this should never happen, but if it does, log */
1207 mprsas_log_command(cm, MPR_RECOVERY,
1208 "cm %p state %x flags 0x%x ccb %p during diag "
1209 "reset\n", cm, cm->cm_state, cm->cm_flags,
1214 sc->io_cmds_active = 0;
1218 mprsas_handle_reinit(struct mpr_softc *sc)
1222 /* Go back into startup mode and freeze the simq, so that CAM
1223 * doesn't send any commands until after we've rediscovered all
1224 * targets and found the proper device handles for them.
1226 * After the reset, portenable will trigger discovery, and after all
1227 * discovery-related activities have finished, the simq will be
1230 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1231 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1232 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1233 mprsas_startup_increment(sc->sassc);
1235 /* notify CAM of a bus reset */
1236 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1239 /* complete and cleanup after all outstanding commands */
1240 mprsas_complete_all_commands(sc);
1242 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1243 __func__, sc->sassc->startup_refcount);
1245 /* zero all the target handles, since they may change after the
1246 * reset, and we have to rediscover all the targets and use the new
1249 for (i = 0; i < sc->sassc->maxtargets; i++) {
1250 if (sc->sassc->targets[i].outstanding != 0)
1251 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1252 i, sc->sassc->targets[i].outstanding);
1253 sc->sassc->targets[i].handle = 0x0;
1254 sc->sassc->targets[i].exp_dev_handle = 0x0;
1255 sc->sassc->targets[i].outstanding = 0;
1256 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1260 mprsas_tm_timeout(void *data)
1262 struct mpr_command *tm = data;
1263 struct mpr_softc *sc = tm->cm_sc;
1265 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1267 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1270 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1271 ("command not inqueue\n"));
1273 tm->cm_state = MPR_CM_STATE_BUSY;
1278 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1280 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1281 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1282 unsigned int cm_count = 0;
1283 struct mpr_command *cm;
1284 struct mprsas_target *targ;
1286 callout_stop(&tm->cm_callout);
1288 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1289 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1293 * Currently there should be no way we can hit this case. It only
1294 * happens when we have a failure to allocate chain frames, and
1295 * task management commands don't have S/G lists.
1297 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1298 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1299 "%s: cm_flags = %#x for LUN reset! "
1300 "This should not happen!\n", __func__, tm->cm_flags);
1301 mprsas_free_tm(sc, tm);
1305 if (reply == NULL) {
1306 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1308 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1309 /* this completion was due to a reset, just cleanup */
1310 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1311 "reset, ignoring NULL LUN reset reply\n");
1313 mprsas_free_tm(sc, tm);
1316 /* we should have gotten a reply. */
1317 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1318 "LUN reset attempt, resetting controller\n");
1324 mpr_dprint(sc, MPR_RECOVERY,
1325 "logical unit reset status 0x%x code 0x%x count %u\n",
1326 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1327 le32toh(reply->TerminationCount));
1330 * See if there are any outstanding commands for this LUN.
1331 * This could be made more efficient by using a per-LU data
1332 * structure of some sort.
1334 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1335 if (cm->cm_lun == tm->cm_lun)
1339 if (cm_count == 0) {
1340 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1341 "Finished recovery after LUN reset for target %u\n",
1344 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1348 * We've finished recovery for this logical unit. check and
1349 * see if some other logical unit has a timedout command
1350 * that needs to be processed.
1352 cm = TAILQ_FIRST(&targ->timedout_commands);
1354 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1355 "More commands to abort for target %u\n", targ->tid);
1356 mprsas_send_abort(sc, tm, cm);
1359 mprsas_free_tm(sc, tm);
1362 /* if we still have commands for this LUN, the reset
1363 * effectively failed, regardless of the status reported.
1364 * Escalate to a target reset.
1366 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1367 "logical unit reset complete for target %u, but still "
1368 "have %u command(s), sending target reset\n", targ->tid,
1370 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1371 mprsas_send_reset(sc, tm,
1372 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1379 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1381 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1382 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1383 struct mprsas_target *targ;
1385 callout_stop(&tm->cm_callout);
1387 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1388 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1392 * Currently there should be no way we can hit this case. It only
1393 * happens when we have a failure to allocate chain frames, and
1394 * task management commands don't have S/G lists.
1396 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1397 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1398 "reset! This should not happen!\n", __func__, tm->cm_flags);
1399 mprsas_free_tm(sc, tm);
1403 if (reply == NULL) {
1404 mpr_dprint(sc, MPR_RECOVERY,
1405 "NULL target reset reply for tm %p TaskMID %u\n",
1406 tm, le16toh(req->TaskMID));
1407 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1408 /* this completion was due to a reset, just cleanup */
1409 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1410 "reset, ignoring NULL target reset reply\n");
1412 mprsas_free_tm(sc, tm);
1415 /* we should have gotten a reply. */
1416 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1417 "target reset attempt, resetting controller\n");
1423 mpr_dprint(sc, MPR_RECOVERY,
1424 "target reset status 0x%x code 0x%x count %u\n",
1425 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1426 le32toh(reply->TerminationCount));
1428 if (targ->outstanding == 0) {
1430 * We've finished recovery for this target and all
1431 * of its logical units.
1433 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1434 "Finished reset recovery for target %u\n", targ->tid);
1436 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1440 mprsas_free_tm(sc, tm);
1443 * After a target reset, if this target still has
1444 * outstanding commands, the reset effectively failed,
1445 * regardless of the status reported. escalate.
1447 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1448 "Target reset complete for target %u, but still have %u "
1449 "command(s), resetting controller\n", targ->tid,
1455 #define MPR_RESET_TIMEOUT 30
1458 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1460 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1461 struct mprsas_target *target;
1464 target = tm->cm_targ;
1465 if (target->handle == 0) {
1466 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1467 "%d\n", __func__, target->tid);
1471 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1472 req->DevHandle = htole16(target->handle);
1473 req->TaskType = type;
1475 if (!target->is_nvme || sc->custom_nvme_tm_handling) {
1476 timeout = MPR_RESET_TIMEOUT;
1478 * Target reset method =
1479 * SAS Hard Link Reset / SATA Link Reset
1481 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1483 timeout = (target->controller_reset_timeout) ? (
1484 target->controller_reset_timeout) : (MPR_RESET_TIMEOUT);
1485 /* PCIe Protocol Level Reset*/
1487 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
1490 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1491 /* XXX Need to handle invalid LUNs */
1492 MPR_SET_LUN(req->LUN, tm->cm_lun);
1493 tm->cm_targ->logical_unit_resets++;
1494 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1495 "Sending logical unit reset to target %u lun %d\n",
1496 target->tid, tm->cm_lun);
1497 tm->cm_complete = mprsas_logical_unit_reset_complete;
1498 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1499 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1500 tm->cm_targ->target_resets++;
1501 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1502 "Sending target reset to target %u\n", target->tid);
1503 tm->cm_complete = mprsas_target_reset_complete;
1504 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1507 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1511 if (target->encl_level_valid) {
1512 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1513 "At enclosure level %d, slot %d, connector name (%4s)\n",
1514 target->encl_level, target->encl_slot,
1515 target->connector_name);
1519 tm->cm_complete_data = (void *)tm;
1521 callout_reset(&tm->cm_callout, timeout * hz,
1522 mprsas_tm_timeout, tm);
1524 err = mpr_map_command(sc, tm);
1526 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1527 "error %d sending reset type %u\n", err, type);
1534 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1536 struct mpr_command *cm;
1537 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1538 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1539 struct mprsas_target *targ;
1541 callout_stop(&tm->cm_callout);
1543 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1544 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1548 * Currently there should be no way we can hit this case. It only
1549 * happens when we have a failure to allocate chain frames, and
1550 * task management commands don't have S/G lists.
1552 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1553 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1554 "cm_flags = %#x for abort %p TaskMID %u!\n",
1555 tm->cm_flags, tm, le16toh(req->TaskMID));
1556 mprsas_free_tm(sc, tm);
1560 if (reply == NULL) {
1561 mpr_dprint(sc, MPR_RECOVERY,
1562 "NULL abort reply for tm %p TaskMID %u\n",
1563 tm, le16toh(req->TaskMID));
1564 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1565 /* this completion was due to a reset, just cleanup */
1566 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1567 "reset, ignoring NULL abort reply\n");
1569 mprsas_free_tm(sc, tm);
1571 /* we should have gotten a reply. */
1572 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1573 "abort attempt, resetting controller\n");
1579 mpr_dprint(sc, MPR_RECOVERY,
1580 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1581 le16toh(req->TaskMID),
1582 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1583 le32toh(reply->TerminationCount));
1585 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1588 * if there are no more timedout commands, we're done with
1589 * error recovery for this target.
1591 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1592 "Finished abort recovery for target %u\n", targ->tid);
1594 mprsas_free_tm(sc, tm);
1595 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1596 /* abort success, but we have more timedout commands to abort */
1597 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1598 "Continuing abort recovery for target %u\n", targ->tid);
1599 mprsas_send_abort(sc, tm, cm);
1602 * we didn't get a command completion, so the abort
1603 * failed as far as we're concerned. escalate.
1605 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1606 "Abort failed for target %u, sending logical unit reset\n",
1609 mprsas_send_reset(sc, tm,
1610 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1614 #define MPR_ABORT_TIMEOUT 5
1617 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1618 struct mpr_command *cm)
1620 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1621 struct mprsas_target *targ;
1625 if (targ->handle == 0) {
1626 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1627 "%s null devhandle for target_id %d\n",
1628 __func__, cm->cm_ccb->ccb_h.target_id);
1632 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1633 "Aborting command %p\n", cm);
1635 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1636 req->DevHandle = htole16(targ->handle);
1637 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1639 /* XXX Need to handle invalid LUNs */
1640 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1642 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1645 tm->cm_complete = mprsas_abort_complete;
1646 tm->cm_complete_data = (void *)tm;
1647 tm->cm_targ = cm->cm_targ;
1648 tm->cm_lun = cm->cm_lun;
1650 if (!targ->is_nvme || sc->custom_nvme_tm_handling)
1651 timeout = MPR_ABORT_TIMEOUT;
1653 timeout = sc->nvme_abort_timeout;
1655 callout_reset(&tm->cm_callout, timeout * hz,
1656 mprsas_tm_timeout, tm);
1660 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1662 err = mpr_map_command(sc, tm);
1664 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1665 "error %d sending abort for cm %p SMID %u\n",
1666 err, cm, req->TaskMID);
1671 mprsas_scsiio_timeout(void *data)
1673 sbintime_t elapsed, now;
1675 struct mpr_softc *sc;
1676 struct mpr_command *cm;
1677 struct mprsas_target *targ;
1679 cm = (struct mpr_command *)data;
1685 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1687 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1690 * Run the interrupt handler to make sure it's not pending. This
1691 * isn't perfect because the command could have already completed
1692 * and been re-used, though this is unlikely.
1694 mpr_intr_locked(sc);
1695 if (cm->cm_state != MPR_CM_STATE_INQUEUE) {
1696 mprsas_log_command(cm, MPR_XINFO,
1697 "SCSI command %p almost timed out\n", cm);
1701 if (cm->cm_ccb == NULL) {
1702 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1709 elapsed = now - ccb->ccb_h.qos.sim_data;
1710 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1711 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1712 targ->tid, targ->handle, ccb->ccb_h.timeout,
1713 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1714 if (targ->encl_level_valid) {
1715 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1716 "At enclosure level %d, slot %d, connector name (%4s)\n",
1717 targ->encl_level, targ->encl_slot, targ->connector_name);
1720 /* XXX first, check the firmware state, to see if it's still
1721 * operational. if not, do a diag reset.
1723 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1724 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1725 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1727 if (targ->tm != NULL) {
1728 /* target already in recovery, just queue up another
1729 * timedout command to be processed later.
1731 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1732 "processing by tm %p\n", cm, targ->tm);
1734 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1736 /* start recovery by aborting the first timedout command */
1737 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1738 "Sending abort to target %u for SMID %d\n", targ->tid,
1739 cm->cm_desc.Default.SMID);
1740 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1742 mprsas_send_abort(sc, targ->tm, cm);
1745 /* XXX queue this target up for recovery once a TM becomes
1746 * available. The firmware only has a limited number of
1747 * HighPriority credits for the high priority requests used
1748 * for task management, and we ran out.
1750 * Isilon: don't worry about this for now, since we have
1751 * more credits than disks in an enclosure, and limit
1752 * ourselves to one TM per target for recovery.
1754 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1755 "timedout cm %p failed to allocate a tm\n", cm);
1760 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1762 * Return 0 - for success,
1763 * 1 - to immediately return back the command with success status to CAM
1764 * negative value - to fallback to firmware path i.e. issue scsi unmap
1765 * to FW without any translation.
1768 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1769 union ccb *ccb, struct mprsas_target *targ)
1771 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1772 struct ccb_scsiio *csio;
1773 struct unmap_parm_list *plist;
1774 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1775 struct nvme_command *c;
1777 uint16_t ndesc, list_len, data_length;
1778 struct mpr_prp_page *prp_page_info;
1779 uint64_t nvme_dsm_ranges_dma_handle;
1782 #if __FreeBSD_version >= 1100103
1783 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1785 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1786 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1787 ccb->csio.cdb_io.cdb_ptr[8]);
1789 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1790 ccb->csio.cdb_io.cdb_bytes[8]);
1794 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1798 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1800 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1801 "save UNMAP data\n");
1805 /* Copy SCSI unmap data to a local buffer */
1806 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1808 /* return back the unmap command to CAM with success status,
1809 * if number of descripts is zero.
1811 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1813 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1814 "UNMAP cmd is Zero\n");
1819 data_length = ndesc * sizeof(struct nvme_dsm_range);
1820 if (data_length > targ->MDTS) {
1821 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1822 "Device's MDTS: %d\n", data_length, targ->MDTS);
1827 prp_page_info = mpr_alloc_prp_page(sc);
1828 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1829 "UNMAP command.\n", __func__));
1832 * Insert the allocated PRP page into the command's PRP page list. This
1833 * will be freed when the command is freed.
1835 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1837 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1838 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1840 bzero(nvme_dsm_ranges, data_length);
1842 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1843 * for each descriptors contained in SCSI UNMAP data.
1845 for (i = 0; i < ndesc; i++) {
1846 nvme_dsm_ranges[i].length =
1847 htole32(be32toh(plist->desc[i].nlb));
1848 nvme_dsm_ranges[i].starting_lba =
1849 htole64(be64toh(plist->desc[i].slba));
1850 nvme_dsm_ranges[i].attributes = 0;
1853 /* Build MPI2.6's NVMe Encapsulated Request Message */
1854 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1855 bzero(req, sizeof(*req));
1856 req->DevHandle = htole16(targ->handle);
1857 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1858 req->Flags = MPI26_NVME_FLAGS_WRITE;
1859 req->ErrorResponseBaseAddress.High =
1860 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1861 req->ErrorResponseBaseAddress.Low =
1862 htole32(cm->cm_sense_busaddr);
1863 req->ErrorResponseAllocationLength =
1864 htole16(sizeof(struct nvme_completion));
1865 req->EncapsulatedCommandLength =
1866 htole16(sizeof(struct nvme_command));
1867 req->DataLength = htole32(data_length);
1869 /* Build NVMe DSM command */
1870 c = (struct nvme_command *) req->NVMe_Command;
1871 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1872 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1873 c->cdw10 = htole32(ndesc - 1);
1874 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1876 cm->cm_length = data_length;
1879 cm->cm_complete = mprsas_scsiio_complete;
1880 cm->cm_complete_data = ccb;
1882 cm->cm_lun = csio->ccb_h.target_lun;
1885 cm->cm_desc.Default.RequestFlags =
1886 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1888 csio->ccb_h.qos.sim_data = sbinuptime();
1889 #if __FreeBSD_version >= 1000029
1890 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1891 mprsas_scsiio_timeout, cm, 0);
1892 #else //__FreeBSD_version < 1000029
1893 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1894 mprsas_scsiio_timeout, cm);
1895 #endif //__FreeBSD_version >= 1000029
1898 targ->outstanding++;
1899 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1900 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1902 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1903 __func__, cm, ccb, targ->outstanding);
1905 mpr_build_nvme_prp(sc, cm, req,
1906 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1907 mpr_map_command(sc, cm);
1915 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1917 MPI2_SCSI_IO_REQUEST *req;
1918 struct ccb_scsiio *csio;
1919 struct mpr_softc *sc;
1920 struct mprsas_target *targ;
1921 struct mprsas_lun *lun;
1922 struct mpr_command *cm;
1923 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1924 uint16_t eedp_flags;
1925 uint32_t mpi_control;
1930 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1933 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1934 ("Target %d out of bounds in XPT_SCSI_IO\n",
1935 csio->ccb_h.target_id));
1936 targ = &sassc->targets[csio->ccb_h.target_id];
1937 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1938 if (targ->handle == 0x0) {
1939 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1940 __func__, csio->ccb_h.target_id);
1941 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1945 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1946 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1947 "supported %u\n", __func__, csio->ccb_h.target_id);
1948 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1953 * Sometimes, it is possible to get a command that is not "In
1954 * Progress" and was actually aborted by the upper layer. Check for
1955 * this here and complete the command without error.
1957 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1958 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1959 "target %u\n", __func__, csio->ccb_h.target_id);
1964 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1965 * that the volume has timed out. We want volumes to be enumerated
1966 * until they are deleted/removed, not just failed.
1968 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1969 if (targ->devinfo == 0)
1970 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1972 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1977 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1978 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1979 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1985 * If target has a reset in progress, freeze the devq and return. The
1986 * devq will be released when the TM reset is finished.
1988 if (targ->flags & MPRSAS_TARGET_INRESET) {
1989 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1990 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1991 __func__, targ->tid);
1992 xpt_freeze_devq(ccb->ccb_h.path, 1);
1997 cm = mpr_alloc_command(sc);
1998 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
2000 mpr_free_command(sc, cm);
2002 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2003 xpt_freeze_simq(sassc->sim, 1);
2004 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2006 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2007 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2012 /* For NVME device's issue UNMAP command directly to NVME drives by
2013 * constructing equivalent native NVMe DataSetManagement command.
2015 #if __FreeBSD_version >= 1100103
2016 scsi_opcode = scsiio_cdb_ptr(csio)[0];
2018 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2019 scsi_opcode = csio->cdb_io.cdb_ptr[0];
2021 scsi_opcode = csio->cdb_io.cdb_bytes[0];
2023 if (scsi_opcode == UNMAP &&
2025 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2026 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2027 if (rc == 1) { /* return command to CAM with success status */
2028 mpr_free_command(sc, cm);
2029 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2032 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2036 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2037 bzero(req, sizeof(*req));
2038 req->DevHandle = htole16(targ->handle);
2039 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2041 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2042 req->SenseBufferLength = MPR_SENSE_LEN;
2044 req->ChainOffset = 0;
2045 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2050 req->DataLength = htole32(csio->dxfer_len);
2051 req->BidirectionalDataLength = 0;
2052 req->IoFlags = htole16(csio->cdb_len);
2055 /* Note: BiDirectional transfers are not supported */
2056 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2058 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2059 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2062 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2063 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2067 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2071 if (csio->cdb_len == 32)
2072 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2074 * It looks like the hardware doesn't require an explicit tag
2075 * number for each transaction. SAM Task Management not supported
2078 switch (csio->tag_action) {
2079 case MSG_HEAD_OF_Q_TAG:
2080 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2082 case MSG_ORDERED_Q_TAG:
2083 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2086 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2088 case CAM_TAG_ACTION_NONE:
2089 case MSG_SIMPLE_Q_TAG:
2091 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2094 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2095 req->Control = htole32(mpi_control);
2097 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2098 mpr_free_command(sc, cm);
2099 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2104 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2105 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2107 KASSERT(csio->cdb_len <= IOCDBLEN,
2108 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2109 "is not set", csio->cdb_len));
2110 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2112 req->IoFlags = htole16(csio->cdb_len);
2115 * Check if EEDP is supported and enabled. If it is then check if the
2116 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2117 * is formatted for EEDP support. If all of this is true, set CDB up
2118 * for EEDP transfer.
2120 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2121 if (sc->eedp_enabled && eedp_flags) {
2122 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2123 if (lun->lun_id == csio->ccb_h.target_lun) {
2128 if ((lun != NULL) && (lun->eedp_formatted)) {
2129 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2130 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2131 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2132 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2133 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2135 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2137 req->EEDPFlags = htole16(eedp_flags);
2140 * If CDB less than 32, fill in Primary Ref Tag with
2141 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2142 * already there. Also, set protection bit. FreeBSD
2143 * currently does not support CDBs bigger than 16, but
2144 * the code doesn't hurt, and will be here for the
2147 if (csio->cdb_len != 32) {
2148 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2149 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2150 PrimaryReferenceTag;
2151 for (i = 0; i < 4; i++) {
2153 req->CDB.CDB32[lba_byte + i];
2156 req->CDB.EEDP32.PrimaryReferenceTag =
2158 CDB.EEDP32.PrimaryReferenceTag);
2159 req->CDB.EEDP32.PrimaryApplicationTagMask =
2162 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2165 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2166 req->EEDPFlags = htole16(eedp_flags);
2167 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2173 cm->cm_length = csio->dxfer_len;
2174 if (cm->cm_length != 0) {
2176 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2180 cm->cm_sge = &req->SGL;
2181 cm->cm_sglsize = (32 - 24) * 4;
2182 cm->cm_complete = mprsas_scsiio_complete;
2183 cm->cm_complete_data = ccb;
2185 cm->cm_lun = csio->ccb_h.target_lun;
2188 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2189 * and set descriptor type.
2191 if (targ->scsi_req_desc_type ==
2192 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2193 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2194 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2195 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2196 if (!sc->atomic_desc_capable) {
2197 cm->cm_desc.FastPathSCSIIO.DevHandle =
2198 htole16(targ->handle);
2201 cm->cm_desc.SCSIIO.RequestFlags =
2202 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2203 if (!sc->atomic_desc_capable)
2204 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2207 csio->ccb_h.qos.sim_data = sbinuptime();
2208 #if __FreeBSD_version >= 1000029
2209 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2210 mprsas_scsiio_timeout, cm, 0);
2211 #else //__FreeBSD_version < 1000029
2212 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2213 mprsas_scsiio_timeout, cm);
2214 #endif //__FreeBSD_version >= 1000029
2217 targ->outstanding++;
2218 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2219 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2221 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2222 __func__, cm, ccb, targ->outstanding);
2224 mpr_map_command(sc, cm);
2229 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2232 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2233 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2237 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2238 MPI2_IOCSTATUS_MASK;
2239 u8 scsi_state = mpi_reply->SCSIState;
2240 u8 scsi_status = mpi_reply->SCSIStatus;
2241 char *desc_ioc_state = NULL;
2242 char *desc_scsi_status = NULL;
2243 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2245 if (log_info == 0x31170000)
2248 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2250 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2253 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2254 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2255 if (targ->encl_level_valid) {
2256 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2257 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2258 targ->connector_name);
2262 * We can add more detail about underflow data here
2265 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2266 "scsi_state %b\n", desc_scsi_status, scsi_status,
2267 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2268 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2270 if (sc->mpr_debug & MPR_XINFO &&
2271 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2272 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2273 scsi_sense_print(csio);
2274 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2277 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2278 response_info = le32toh(mpi_reply->ResponseInfo);
2279 response_bytes = (u8 *)&response_info;
2280 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2282 mpr_describe_table(mpr_scsi_taskmgmt_string,
2283 response_bytes[0]));
2287 /** mprsas_nvme_trans_status_code
2289 * Convert Native NVMe command error status to
2290 * equivalent SCSI error status.
2292 * Returns appropriate scsi_status
2295 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2296 struct mpr_command *cm)
2298 u8 status = MPI2_SCSI_STATUS_GOOD;
2299 int skey, asc, ascq;
2300 union ccb *ccb = cm->cm_complete_data;
2301 int returned_sense_len;
2304 sct = NVME_STATUS_GET_SCT(nvme_status);
2305 sc = NVME_STATUS_GET_SC(nvme_status);
2307 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2308 skey = SSD_KEY_ILLEGAL_REQUEST;
2309 asc = SCSI_ASC_NO_SENSE;
2310 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2313 case NVME_SCT_GENERIC:
2315 case NVME_SC_SUCCESS:
2316 status = MPI2_SCSI_STATUS_GOOD;
2317 skey = SSD_KEY_NO_SENSE;
2318 asc = SCSI_ASC_NO_SENSE;
2319 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2321 case NVME_SC_INVALID_OPCODE:
2322 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2323 skey = SSD_KEY_ILLEGAL_REQUEST;
2324 asc = SCSI_ASC_ILLEGAL_COMMAND;
2325 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2327 case NVME_SC_INVALID_FIELD:
2328 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2329 skey = SSD_KEY_ILLEGAL_REQUEST;
2330 asc = SCSI_ASC_INVALID_CDB;
2331 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2333 case NVME_SC_DATA_TRANSFER_ERROR:
2334 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2335 skey = SSD_KEY_MEDIUM_ERROR;
2336 asc = SCSI_ASC_NO_SENSE;
2337 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2339 case NVME_SC_ABORTED_POWER_LOSS:
2340 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2341 skey = SSD_KEY_ABORTED_COMMAND;
2342 asc = SCSI_ASC_WARNING;
2343 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2345 case NVME_SC_INTERNAL_DEVICE_ERROR:
2346 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2347 skey = SSD_KEY_HARDWARE_ERROR;
2348 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2349 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2351 case NVME_SC_ABORTED_BY_REQUEST:
2352 case NVME_SC_ABORTED_SQ_DELETION:
2353 case NVME_SC_ABORTED_FAILED_FUSED:
2354 case NVME_SC_ABORTED_MISSING_FUSED:
2355 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2356 skey = SSD_KEY_ABORTED_COMMAND;
2357 asc = SCSI_ASC_NO_SENSE;
2358 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2360 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2361 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2362 skey = SSD_KEY_ILLEGAL_REQUEST;
2363 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2364 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2366 case NVME_SC_LBA_OUT_OF_RANGE:
2367 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2368 skey = SSD_KEY_ILLEGAL_REQUEST;
2369 asc = SCSI_ASC_ILLEGAL_BLOCK;
2370 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2372 case NVME_SC_CAPACITY_EXCEEDED:
2373 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2374 skey = SSD_KEY_MEDIUM_ERROR;
2375 asc = SCSI_ASC_NO_SENSE;
2376 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2378 case NVME_SC_NAMESPACE_NOT_READY:
2379 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2380 skey = SSD_KEY_NOT_READY;
2381 asc = SCSI_ASC_LUN_NOT_READY;
2382 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2386 case NVME_SCT_COMMAND_SPECIFIC:
2388 case NVME_SC_INVALID_FORMAT:
2389 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2390 skey = SSD_KEY_ILLEGAL_REQUEST;
2391 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2392 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2394 case NVME_SC_CONFLICTING_ATTRIBUTES:
2395 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2396 skey = SSD_KEY_ILLEGAL_REQUEST;
2397 asc = SCSI_ASC_INVALID_CDB;
2398 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2402 case NVME_SCT_MEDIA_ERROR:
2404 case NVME_SC_WRITE_FAULTS:
2405 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2406 skey = SSD_KEY_MEDIUM_ERROR;
2407 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2408 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2410 case NVME_SC_UNRECOVERED_READ_ERROR:
2411 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2412 skey = SSD_KEY_MEDIUM_ERROR;
2413 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2414 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2416 case NVME_SC_GUARD_CHECK_ERROR:
2417 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2418 skey = SSD_KEY_MEDIUM_ERROR;
2419 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2420 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2422 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2423 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2424 skey = SSD_KEY_MEDIUM_ERROR;
2425 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2426 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2428 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2429 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2430 skey = SSD_KEY_MEDIUM_ERROR;
2431 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2432 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2434 case NVME_SC_COMPARE_FAILURE:
2435 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2436 skey = SSD_KEY_MISCOMPARE;
2437 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2438 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2440 case NVME_SC_ACCESS_DENIED:
2441 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2442 skey = SSD_KEY_ILLEGAL_REQUEST;
2443 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2444 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2450 returned_sense_len = sizeof(struct scsi_sense_data);
2451 if (returned_sense_len < ccb->csio.sense_len)
2452 ccb->csio.sense_resid = ccb->csio.sense_len -
2455 ccb->csio.sense_resid = 0;
2457 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2458 1, skey, asc, ascq, SSD_ELEM_NONE);
2459 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2464 /** mprsas_complete_nvme_unmap
2466 * Complete native NVMe command issued using NVMe Encapsulated
2470 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2472 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2473 struct nvme_completion *nvme_completion = NULL;
2474 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2476 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2477 if (le16toh(mpi_reply->ErrorResponseCount)){
2478 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2479 scsi_status = mprsas_nvme_trans_status_code(
2480 nvme_completion->status, cm);
2486 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2488 MPI2_SCSI_IO_REPLY *rep;
2490 struct ccb_scsiio *csio;
2491 struct mprsas_softc *sassc;
2492 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2493 u8 *TLR_bits, TLR_on, *scsi_cdb;
2496 struct mprsas_target *target;
2497 target_id_t target_id;
2500 mpr_dprint(sc, MPR_TRACE,
2501 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2502 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2503 cm->cm_targ->outstanding);
2505 callout_stop(&cm->cm_callout);
2506 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2509 ccb = cm->cm_complete_data;
2511 target_id = csio->ccb_h.target_id;
2512 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2514 * XXX KDM if the chain allocation fails, does it matter if we do
2515 * the sync and unload here? It is simpler to do it in every case,
2516 * assuming it doesn't cause problems.
2518 if (cm->cm_data != NULL) {
2519 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2520 dir = BUS_DMASYNC_POSTREAD;
2521 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2522 dir = BUS_DMASYNC_POSTWRITE;
2523 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2524 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2527 cm->cm_targ->completed++;
2528 cm->cm_targ->outstanding--;
2529 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2530 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2532 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2533 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2534 cm->cm_state = MPR_CM_STATE_BUSY;
2535 if (cm->cm_reply != NULL)
2536 mprsas_log_command(cm, MPR_RECOVERY,
2537 "completed timedout cm %p ccb %p during recovery "
2538 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2539 le16toh(rep->IOCStatus), rep->SCSIStatus,
2540 rep->SCSIState, le32toh(rep->TransferCount));
2542 mprsas_log_command(cm, MPR_RECOVERY,
2543 "completed timedout cm %p ccb %p during recovery\n",
2545 } else if (cm->cm_targ->tm != NULL) {
2546 if (cm->cm_reply != NULL)
2547 mprsas_log_command(cm, MPR_RECOVERY,
2548 "completed cm %p ccb %p during recovery "
2549 "ioc %x scsi %x state %x xfer %u\n",
2550 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2551 rep->SCSIStatus, rep->SCSIState,
2552 le32toh(rep->TransferCount));
2554 mprsas_log_command(cm, MPR_RECOVERY,
2555 "completed cm %p ccb %p during recovery\n",
2557 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2558 mprsas_log_command(cm, MPR_RECOVERY,
2559 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2562 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2564 * We ran into an error after we tried to map the command,
2565 * so we're getting a callback without queueing the command
2566 * to the hardware. So we set the status here, and it will
2567 * be retained below. We'll go through the "fast path",
2568 * because there can be no reply when we haven't actually
2569 * gone out to the hardware.
2571 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2574 * Currently the only error included in the mask is
2575 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2576 * chain frames. We need to freeze the queue until we get
2577 * a command that completed without this error, which will
2578 * hopefully have some chain frames attached that we can
2579 * use. If we wanted to get smarter about it, we would
2580 * only unfreeze the queue in this condition when we're
2581 * sure that we're getting some chain frames back. That's
2582 * probably unnecessary.
2584 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2585 xpt_freeze_simq(sassc->sim, 1);
2586 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2587 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2588 "freezing SIM queue\n");
2593 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2594 * flag, and use it in a few places in the rest of this function for
2595 * convenience. Use the macro if available.
2597 #if __FreeBSD_version >= 1100103
2598 scsi_cdb = scsiio_cdb_ptr(csio);
2600 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2601 scsi_cdb = csio->cdb_io.cdb_ptr;
2603 scsi_cdb = csio->cdb_io.cdb_bytes;
2607 * If this is a Start Stop Unit command and it was issued by the driver
2608 * during shutdown, decrement the refcount to account for all of the
2609 * commands that were sent. All SSU commands should be completed before
2610 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2613 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2614 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2618 /* Take the fast path to completion */
2619 if (cm->cm_reply == NULL) {
2620 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2621 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2622 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2624 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2625 csio->scsi_status = SCSI_STATUS_OK;
2627 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2628 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2629 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2630 mpr_dprint(sc, MPR_XINFO,
2631 "Unfreezing SIM queue\n");
2636 * There are two scenarios where the status won't be
2637 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2638 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2640 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2642 * Freeze the dev queue so that commands are
2643 * executed in the correct order after error
2646 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2647 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2649 mpr_free_command(sc, cm);
2654 target = &sassc->targets[target_id];
2655 if (scsi_cdb[0] == UNMAP &&
2657 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2658 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2659 csio->scsi_status = rep->SCSIStatus;
2662 mprsas_log_command(cm, MPR_XINFO,
2663 "ioc %x scsi %x state %x xfer %u\n",
2664 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2665 le32toh(rep->TransferCount));
2667 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2668 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2669 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2671 case MPI2_IOCSTATUS_SUCCESS:
2672 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2673 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2674 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2675 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2677 /* Completion failed at the transport level. */
2678 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2679 MPI2_SCSI_STATE_TERMINATED)) {
2680 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2684 /* In a modern packetized environment, an autosense failure
2685 * implies that there's not much else that can be done to
2686 * recover the command.
2688 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2689 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2694 * CAM doesn't care about SAS Response Info data, but if this is
2695 * the state check if TLR should be done. If not, clear the
2696 * TLR_bits for the target.
2698 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2699 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2700 == MPR_SCSI_RI_INVALID_FRAME)) {
2701 sc->mapping_table[target_id].TLR_bits =
2702 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2706 * Intentionally override the normal SCSI status reporting
2707 * for these two cases. These are likely to happen in a
2708 * multi-initiator environment, and we want to make sure that
2709 * CAM retries these commands rather than fail them.
2711 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2712 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2713 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2717 /* Handle normal status and sense */
2718 csio->scsi_status = rep->SCSIStatus;
2719 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2720 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2722 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2724 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2725 int sense_len, returned_sense_len;
2727 returned_sense_len = min(le32toh(rep->SenseCount),
2728 sizeof(struct scsi_sense_data));
2729 if (returned_sense_len < csio->sense_len)
2730 csio->sense_resid = csio->sense_len -
2733 csio->sense_resid = 0;
2735 sense_len = min(returned_sense_len,
2736 csio->sense_len - csio->sense_resid);
2737 bzero(&csio->sense_data, sizeof(csio->sense_data));
2738 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2739 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2743 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2744 * and it's page code 0 (Supported Page List), and there is
2745 * inquiry data, and this is for a sequential access device, and
2746 * the device is an SSP target, and TLR is supported by the
2747 * controller, turn the TLR_bits value ON if page 0x90 is
2750 if ((scsi_cdb[0] == INQUIRY) &&
2751 (scsi_cdb[1] & SI_EVPD) &&
2752 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2753 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2754 (csio->data_ptr != NULL) &&
2755 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2756 (sc->control_TLR) &&
2757 (sc->mapping_table[target_id].device_info &
2758 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2759 vpd_list = (struct scsi_vpd_supported_page_list *)
2761 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2762 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2763 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2764 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2765 alloc_len -= csio->resid;
2766 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2767 if (vpd_list->list[i] == 0x90) {
2775 * If this is a SATA direct-access end device, mark it so that
2776 * a SCSI StartStopUnit command will be sent to it when the
2777 * driver is being shutdown.
2779 if ((scsi_cdb[0] == INQUIRY) &&
2780 (csio->data_ptr != NULL) &&
2781 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2782 (sc->mapping_table[target_id].device_info &
2783 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2784 ((sc->mapping_table[target_id].device_info &
2785 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2786 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2787 target = &sassc->targets[target_id];
2788 target->supports_SSU = TRUE;
2789 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2793 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2794 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2796 * If devinfo is 0 this will be a volume. In that case don't
2797 * tell CAM that the volume is not there. We want volumes to
2798 * be enumerated until they are deleted/removed, not just
2801 if (cm->cm_targ->devinfo == 0)
2802 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2804 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2806 case MPI2_IOCSTATUS_INVALID_SGL:
2807 mpr_print_scsiio_cmd(sc, cm);
2808 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2810 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2812 * This is one of the responses that comes back when an I/O
2813 * has been aborted. If it is because of a timeout that we
2814 * initiated, just set the status to CAM_CMD_TIMEOUT.
2815 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2816 * command is the same (it gets retried, subject to the
2817 * retry counter), the only difference is what gets printed
2820 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2821 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2823 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2825 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2826 /* resid is ignored for this condition */
2828 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2830 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2831 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2833 * These can sometimes be transient transport-related
2834 * errors, and sometimes persistent drive-related errors.
2835 * We used to retry these without decrementing the retry
2836 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2837 * we hit a persistent drive problem that returns one of
2838 * these error codes, we would retry indefinitely. So,
2839 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2840 * count and avoid infinite retries. We're taking the
2841 * potential risk of flagging false failures in the event
2842 * of a topology-related error (e.g. a SAS expander problem
2843 * causes a command addressed to a drive to fail), but
2844 * avoiding getting into an infinite retry loop.
2846 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2847 mpr_dprint(sc, MPR_INFO,
2848 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2849 mpr_describe_table(mpr_iocstatus_string,
2850 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2851 target_id, cm->cm_desc.Default.SMID,
2852 le32toh(rep->IOCLogInfo));
2853 mpr_dprint(sc, MPR_XINFO,
2854 "SCSIStatus %x SCSIState %x xfercount %u\n",
2855 rep->SCSIStatus, rep->SCSIState,
2856 le32toh(rep->TransferCount));
2858 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2859 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2860 case MPI2_IOCSTATUS_INVALID_VPID:
2861 case MPI2_IOCSTATUS_INVALID_FIELD:
2862 case MPI2_IOCSTATUS_INVALID_STATE:
2863 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2864 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2865 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2866 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2867 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2869 mprsas_log_command(cm, MPR_XINFO,
2870 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2871 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2872 rep->SCSIStatus, rep->SCSIState,
2873 le32toh(rep->TransferCount));
2874 csio->resid = cm->cm_length;
2876 if (scsi_cdb[0] == UNMAP &&
2878 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2879 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2881 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2886 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2888 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2889 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2890 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2891 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2895 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2896 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2897 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2900 mpr_free_command(sc, cm);
2904 #if __FreeBSD_version >= 900026
2906 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2908 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2909 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2913 ccb = cm->cm_complete_data;
2916 * Currently there should be no way we can hit this case. It only
2917 * happens when we have a failure to allocate chain frames, and SMP
2918 * commands require two S/G elements only. That should be handled
2919 * in the standard request size.
2921 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2922 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2923 "request!\n", __func__, cm->cm_flags);
2924 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2928 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2930 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2931 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2935 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2936 sasaddr = le32toh(req->SASAddress.Low);
2937 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2939 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2940 MPI2_IOCSTATUS_SUCCESS ||
2941 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2942 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2943 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2944 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2948 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2949 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2951 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2952 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2954 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2958 * We sync in both directions because we had DMAs in the S/G list
2959 * in both directions.
2961 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2962 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2963 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2964 mpr_free_command(sc, cm);
2969 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2971 struct mpr_command *cm;
2972 uint8_t *request, *response;
2973 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2974 struct mpr_softc *sc;
2982 #if (__FreeBSD_version >= 1000028) || \
2983 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2984 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2985 case CAM_DATA_PADDR:
2986 case CAM_DATA_SG_PADDR:
2988 * XXX We don't yet support physical addresses here.
2990 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2991 "supported\n", __func__);
2992 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2997 * The chip does not support more than one buffer for the
2998 * request or response.
3000 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3001 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3002 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3003 "response buffer segments not supported for SMP\n",
3005 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3011 * The CAM_SCATTER_VALID flag was originally implemented
3012 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3013 * We have two. So, just take that flag to mean that we
3014 * might have S/G lists, and look at the S/G segment count
3015 * to figure out whether that is the case for each individual
3018 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3019 bus_dma_segment_t *req_sg;
3021 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3022 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3024 request = ccb->smpio.smp_request;
3026 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3027 bus_dma_segment_t *rsp_sg;
3029 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3030 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3032 response = ccb->smpio.smp_response;
3034 case CAM_DATA_VADDR:
3035 request = ccb->smpio.smp_request;
3036 response = ccb->smpio.smp_response;
3039 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3043 #else /* __FreeBSD_version < 1000028 */
3045 * XXX We don't yet support physical addresses here.
3047 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3048 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3049 "supported\n", __func__);
3050 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3056 * If the user wants to send an S/G list, check to make sure they
3057 * have single buffers.
3059 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3061 * The chip does not support more than one buffer for the
3062 * request or response.
3064 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3065 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3066 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3067 "response buffer segments not supported for SMP\n",
3069 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3075 * The CAM_SCATTER_VALID flag was originally implemented
3076 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3077 * We have two. So, just take that flag to mean that we
3078 * might have S/G lists, and look at the S/G segment count
3079 * to figure out whether that is the case for each individual
3082 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3083 bus_dma_segment_t *req_sg;
3085 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3086 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3088 request = ccb->smpio.smp_request;
3090 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3091 bus_dma_segment_t *rsp_sg;
3093 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3094 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3096 response = ccb->smpio.smp_response;
3098 request = ccb->smpio.smp_request;
3099 response = ccb->smpio.smp_response;
3101 #endif /* __FreeBSD_version < 1000028 */
3103 cm = mpr_alloc_command(sc);
3105 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3107 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3112 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3113 bzero(req, sizeof(*req));
3114 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3116 /* Allow the chip to use any route to this SAS address. */
3117 req->PhysicalPort = 0xff;
3119 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3121 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3123 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3124 "%#jx\n", __func__, (uintmax_t)sasaddr);
3126 mpr_init_sge(cm, req, &req->SGL);
3129 * Set up a uio to pass into mpr_map_command(). This allows us to
3130 * do one map command, and one busdma call in there.
3132 cm->cm_uio.uio_iov = cm->cm_iovec;
3133 cm->cm_uio.uio_iovcnt = 2;
3134 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3137 * The read/write flag isn't used by busdma, but set it just in
3138 * case. This isn't exactly accurate, either, since we're going in
3141 cm->cm_uio.uio_rw = UIO_WRITE;
3143 cm->cm_iovec[0].iov_base = request;
3144 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3145 cm->cm_iovec[1].iov_base = response;
3146 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3148 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3149 cm->cm_iovec[1].iov_len;
3152 * Trigger a warning message in mpr_data_cb() for the user if we
3153 * wind up exceeding two S/G segments. The chip expects one
3154 * segment for the request and another for the response.
3156 cm->cm_max_segs = 2;
3158 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3159 cm->cm_complete = mprsas_smpio_complete;
3160 cm->cm_complete_data = ccb;
3163 * Tell the mapping code that we're using a uio, and that this is
3164 * an SMP passthrough request. There is a little special-case
3165 * logic there (in mpr_data_cb()) to handle the bidirectional
3168 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3169 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3171 /* The chip data format is little endian. */
3172 req->SASAddress.High = htole32(sasaddr >> 32);
3173 req->SASAddress.Low = htole32(sasaddr);
3176 * XXX Note that we don't have a timeout/abort mechanism here.
3177 * From the manual, it looks like task management requests only
3178 * work for SCSI IO and SATA passthrough requests. We may need to
3179 * have a mechanism to retry requests in the event of a chip reset
3180 * at least. Hopefully the chip will insure that any errors short
3181 * of that are relayed back to the driver.
3183 error = mpr_map_command(sc, cm);
3184 if ((error != 0) && (error != EINPROGRESS)) {
3185 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3186 "mpr_map_command()\n", __func__, error);
3193 mpr_free_command(sc, cm);
3194 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3200 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3202 struct mpr_softc *sc;
3203 struct mprsas_target *targ;
3204 uint64_t sasaddr = 0;
3209 * Make sure the target exists.
3211 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3212 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3213 targ = &sassc->targets[ccb->ccb_h.target_id];
3214 if (targ->handle == 0x0) {
3215 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3216 __func__, ccb->ccb_h.target_id);
3217 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3223 * If this device has an embedded SMP target, we'll talk to it
3225 * figure out what the expander's address is.
3227 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3228 sasaddr = targ->sasaddr;
3231 * If we don't have a SAS address for the expander yet, try
3232 * grabbing it from the page 0x83 information cached in the
3233 * transport layer for this target. LSI expanders report the
3234 * expander SAS address as the port-associated SAS address in
3235 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3238 * XXX KDM disable this for now, but leave it commented out so that
3239 * it is obvious that this is another possible way to get the SAS
3242 * The parent handle method below is a little more reliable, and
3243 * the other benefit is that it works for devices other than SES
3244 * devices. So you can send a SMP request to a da(4) device and it
3245 * will get routed to the expander that device is attached to.
3246 * (Assuming the da(4) device doesn't contain an SMP target...)
3250 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3254 * If we still don't have a SAS address for the expander, look for
3255 * the parent device of this device, which is probably the expander.
3258 #ifdef OLD_MPR_PROBE
3259 struct mprsas_target *parent_target;
3262 if (targ->parent_handle == 0x0) {
3263 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3264 "a valid parent handle!\n", __func__, targ->handle);
3265 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3268 #ifdef OLD_MPR_PROBE
3269 parent_target = mprsas_find_target_by_handle(sassc, 0,
3270 targ->parent_handle);
3272 if (parent_target == NULL) {
3273 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3274 "a valid parent target!\n", __func__, targ->handle);
3275 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3279 if ((parent_target->devinfo &
3280 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3281 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3282 "does not have an SMP target!\n", __func__,
3283 targ->handle, parent_target->handle);
3284 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3288 sasaddr = parent_target->sasaddr;
3289 #else /* OLD_MPR_PROBE */
3290 if ((targ->parent_devinfo &
3291 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3292 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3293 "does not have an SMP target!\n", __func__,
3294 targ->handle, targ->parent_handle);
3295 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3299 if (targ->parent_sasaddr == 0x0) {
3300 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3301 "%d does not have a valid SAS address!\n", __func__,
3302 targ->handle, targ->parent_handle);
3303 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3307 sasaddr = targ->parent_sasaddr;
3308 #endif /* OLD_MPR_PROBE */
3313 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3314 "handle %d\n", __func__, targ->handle);
3315 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3318 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3326 #endif //__FreeBSD_version >= 900026
3329 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3331 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3332 struct mpr_softc *sc;
3333 struct mpr_command *tm;
3334 struct mprsas_target *targ;
3336 MPR_FUNCTRACE(sassc->sc);
3337 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3339 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3340 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3342 tm = mprsas_alloc_tm(sc);
3344 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3345 "mprsas_action_resetdev\n");
3346 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3351 targ = &sassc->targets[ccb->ccb_h.target_id];
3352 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3353 req->DevHandle = htole16(targ->handle);
3354 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3356 if (!targ->is_nvme || sc->custom_nvme_tm_handling) {
3357 /* SAS Hard Link Reset / SATA Link Reset */
3358 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3360 /* PCIe Protocol Level Reset*/
3362 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3366 tm->cm_complete = mprsas_resetdev_complete;
3367 tm->cm_complete_data = ccb;
3369 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3370 __func__, targ->tid);
3373 mprsas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3374 mpr_map_command(sc, tm);
3378 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3380 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3384 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3386 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3387 ccb = tm->cm_complete_data;
3390 * Currently there should be no way we can hit this case. It only
3391 * happens when we have a failure to allocate chain frames, and
3392 * task management commands don't have S/G lists.
3394 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3395 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3397 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3399 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3400 "handle %#04x! This should not happen!\n", __func__,
3401 tm->cm_flags, req->DevHandle);
3402 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3406 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3407 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3409 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3410 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3411 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3415 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3419 mprsas_free_tm(sc, tm);
3424 mprsas_poll(struct cam_sim *sim)
3426 struct mprsas_softc *sassc;
3428 sassc = cam_sim_softc(sim);
3430 if (sassc->sc->mpr_debug & MPR_TRACE) {
3431 /* frequent debug messages during a panic just slow
3432 * everything down too much.
3434 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3436 sassc->sc->mpr_debug &= ~MPR_TRACE;
3439 mpr_intr_locked(sassc->sc);
3443 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3446 struct mpr_softc *sc;
3448 sc = (struct mpr_softc *)callback_arg;
3451 #if (__FreeBSD_version >= 1000006) || \
3452 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3453 case AC_ADVINFO_CHANGED: {
3454 struct mprsas_target *target;
3455 struct mprsas_softc *sassc;
3456 struct scsi_read_capacity_data_long rcap_buf;
3457 struct ccb_dev_advinfo cdai;
3458 struct mprsas_lun *lun;
3463 buftype = (uintptr_t)arg;
3469 * We're only interested in read capacity data changes.
3471 if (buftype != CDAI_TYPE_RCAPLONG)
3475 * See the comment in mpr_attach_sas() for a detailed
3476 * explanation. In these versions of FreeBSD we register
3477 * for all events and filter out the events that don't
3480 #if (__FreeBSD_version < 1000703) || \
3481 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3482 if (xpt_path_path_id(path) != sassc->sim->path_id)
3487 * We should have a handle for this, but check to make sure.
3489 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3490 ("Target %d out of bounds in mprsas_async\n",
3491 xpt_path_target_id(path)));
3492 target = &sassc->targets[xpt_path_target_id(path)];
3493 if (target->handle == 0)
3496 lunid = xpt_path_lun_id(path);
3498 SLIST_FOREACH(lun, &target->luns, lun_link) {
3499 if (lun->lun_id == lunid) {
3505 if (found_lun == 0) {
3506 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3509 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3510 "LUN for EEDP support.\n");
3513 lun->lun_id = lunid;
3514 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3517 bzero(&rcap_buf, sizeof(rcap_buf));
3518 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3519 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3520 cdai.ccb_h.flags = CAM_DIR_IN;
3521 cdai.buftype = CDAI_TYPE_RCAPLONG;
3522 #if (__FreeBSD_version >= 1100061) || \
3523 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3524 cdai.flags = CDAI_FLAG_NONE;
3528 cdai.bufsiz = sizeof(rcap_buf);
3529 cdai.buf = (uint8_t *)&rcap_buf;
3530 xpt_action((union ccb *)&cdai);
3531 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3532 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3534 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3535 && (rcap_buf.prot & SRC16_PROT_EN)) {
3536 switch (rcap_buf.prot & SRC16_P_TYPE) {
3539 lun->eedp_formatted = TRUE;
3540 lun->eedp_block_size =
3541 scsi_4btoul(rcap_buf.length);
3545 lun->eedp_formatted = FALSE;
3546 lun->eedp_block_size = 0;
3550 lun->eedp_formatted = FALSE;
3551 lun->eedp_block_size = 0;
3556 case AC_FOUND_DEVICE: {
3557 struct ccb_getdev *cgd;
3560 * See the comment in mpr_attach_sas() for a detailed
3561 * explanation. In these versions of FreeBSD we register
3562 * for all events and filter out the events that don't
3565 #if (__FreeBSD_version < 1000703) || \
3566 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3567 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3572 #if (__FreeBSD_version < 901503) || \
3573 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3574 mprsas_check_eedp(sc, path, cgd);
3583 #if (__FreeBSD_version < 901503) || \
3584 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3586 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3587 struct ccb_getdev *cgd)
3589 struct mprsas_softc *sassc = sc->sassc;
3590 struct ccb_scsiio *csio;
3591 struct scsi_read_capacity_16 *scsi_cmd;
3592 struct scsi_read_capacity_eedp *rcap_buf;
3594 target_id_t targetid;
3597 struct cam_path *local_path;
3598 struct mprsas_target *target;
3599 struct mprsas_lun *lun;
3603 pathid = cam_sim_path(sassc->sim);
3604 targetid = xpt_path_target_id(path);
3605 lunid = xpt_path_lun_id(path);
3607 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3608 "mprsas_check_eedp\n", targetid));
3609 target = &sassc->targets[targetid];
3610 if (target->handle == 0x0)
3614 * Determine if the device is EEDP capable.
3616 * If this flag is set in the inquiry data, the device supports
3617 * protection information, and must support the 16 byte read capacity
3618 * command, otherwise continue without sending read cap 16.
3620 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3624 * Issue a READ CAPACITY 16 command. This info is used to determine if
3625 * the LUN is formatted for EEDP support.
3627 ccb = xpt_alloc_ccb_nowait();
3629 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3634 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3636 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3643 * If LUN is already in list, don't create a new one.
3646 SLIST_FOREACH(lun, &target->luns, lun_link) {
3647 if (lun->lun_id == lunid) {
3653 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3656 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3658 xpt_free_path(local_path);
3662 lun->lun_id = lunid;
3663 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3666 xpt_path_string(local_path, path_str, sizeof(path_str));
3667 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3668 path_str, target->handle);
3671 * Issue a READ CAPACITY 16 command for the LUN. The
3672 * mprsas_read_cap_done function will load the read cap info into the
3675 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3677 if (rcap_buf == NULL) {
3678 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3679 "buffer for EEDP support.\n");
3680 xpt_free_path(ccb->ccb_h.path);
3684 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3686 csio->ccb_h.func_code = XPT_SCSI_IO;
3687 csio->ccb_h.flags = CAM_DIR_IN;
3688 csio->ccb_h.retry_count = 4;
3689 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3690 csio->ccb_h.timeout = 60000;
3691 csio->data_ptr = (uint8_t *)rcap_buf;
3692 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3693 csio->sense_len = MPR_SENSE_LEN;
3694 csio->cdb_len = sizeof(*scsi_cmd);
3695 csio->tag_action = MSG_SIMPLE_Q_TAG;
3697 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3698 bzero(scsi_cmd, sizeof(*scsi_cmd));
3699 scsi_cmd->opcode = 0x9E;
3700 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3701 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3703 ccb->ccb_h.ppriv_ptr1 = sassc;
3708 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3710 struct mprsas_softc *sassc;
3711 struct mprsas_target *target;
3712 struct mprsas_lun *lun;
3713 struct scsi_read_capacity_eedp *rcap_buf;
3715 if (done_ccb == NULL)
3718 /* Driver need to release devq, it Scsi command is
3719 * generated by driver internally.
3720 * Currently there is a single place where driver
3721 * calls scsi command internally. In future if driver
3722 * calls more scsi command internally, it needs to release
3723 * devq internally, since those command will not go back to
3726 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3727 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3728 xpt_release_devq(done_ccb->ccb_h.path,
3729 /*count*/ 1, /*run_queue*/TRUE);
3732 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3735 * Get the LUN ID for the path and look it up in the LUN list for the
3738 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3739 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3740 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3741 target = &sassc->targets[done_ccb->ccb_h.target_id];
3742 SLIST_FOREACH(lun, &target->luns, lun_link) {
3743 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3747 * Got the LUN in the target's LUN list. Fill it in with EEDP
3748 * info. If the READ CAP 16 command had some SCSI error (common
3749 * if command is not supported), mark the lun as not supporting
3750 * EEDP and set the block size to 0.
3752 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3753 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3754 lun->eedp_formatted = FALSE;
3755 lun->eedp_block_size = 0;
3759 if (rcap_buf->protect & 0x01) {
3760 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3761 "%d is formatted for EEDP support.\n",
3762 done_ccb->ccb_h.target_lun,
3763 done_ccb->ccb_h.target_id);
3764 lun->eedp_formatted = TRUE;
3765 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3770 // Finished with this CCB and path.
3771 free(rcap_buf, M_MPR);
3772 xpt_free_path(done_ccb->ccb_h.path);
3773 xpt_free_ccb(done_ccb);
3775 #endif /* (__FreeBSD_version < 901503) || \
3776 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3779 * Set the INRESET flag for this target so that no I/O will be sent to
3780 * the target until the reset has completed. If an I/O request does
3781 * happen, the devq will be frozen. The CCB holds the path which is
3782 * used to release the devq. The devq is released and the CCB is freed
3783 * when the TM completes.
3786 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3787 struct mprsas_target *target, lun_id_t lun_id)
3792 ccb = xpt_alloc_ccb_nowait();
3794 path_id = cam_sim_path(sc->sassc->sim);
3795 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3796 target->tid, lun_id) != CAM_REQ_CMP) {
3800 tm->cm_targ = target;
3801 target->flags |= MPRSAS_TARGET_INRESET;
3807 mprsas_startup(struct mpr_softc *sc)
3810 * Send the port enable message and set the wait_for_port_enable flag.
3811 * This flag helps to keep the simq frozen until all discovery events
3814 sc->wait_for_port_enable = 1;
3815 mprsas_send_portenable(sc);
3820 mprsas_send_portenable(struct mpr_softc *sc)
3822 MPI2_PORT_ENABLE_REQUEST *request;
3823 struct mpr_command *cm;
3827 if ((cm = mpr_alloc_command(sc)) == NULL)
3829 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3830 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3831 request->MsgFlags = 0;
3833 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3834 cm->cm_complete = mprsas_portenable_complete;
3838 mpr_map_command(sc, cm);
3839 mpr_dprint(sc, MPR_XINFO,
3840 "mpr_send_portenable finished cm %p req %p complete %p\n",
3841 cm, cm->cm_req, cm->cm_complete);
3846 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3848 MPI2_PORT_ENABLE_REPLY *reply;
3849 struct mprsas_softc *sassc;
3855 * Currently there should be no way we can hit this case. It only
3856 * happens when we have a failure to allocate chain frames, and
3857 * port enable commands don't have S/G lists.
3859 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3860 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3861 "This should not happen!\n", __func__, cm->cm_flags);
3864 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3866 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3867 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3868 MPI2_IOCSTATUS_SUCCESS)
3869 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3871 mpr_free_command(sc, cm);
3873 * Done waiting for port enable to complete. Decrement the refcount.
3874 * If refcount is 0, discovery is complete and a rescan of the bus can
3877 sc->wait_for_port_enable = 0;
3878 sc->port_enable_complete = 1;
3879 wakeup(&sc->port_enable_complete);
3880 mprsas_startup_decrement(sassc);
3884 mprsas_check_id(struct mprsas_softc *sassc, int id)
3886 struct mpr_softc *sc = sassc->sc;
3890 ids = &sc->exclude_ids[0];
3891 while((name = strsep(&ids, ",")) != NULL) {
3892 if (name[0] == '\0')
3894 if (strtol(name, NULL, 0) == (long)id)
3902 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3904 struct mprsas_softc *sassc;
3905 struct mprsas_lun *lun, *lun_tmp;
3906 struct mprsas_target *targ;
3911 * The number of targets is based on IOC Facts, so free all of
3912 * the allocated LUNs for each target and then the target buffer
3915 for (i=0; i< maxtargets; i++) {
3916 targ = &sassc->targets[i];
3917 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3921 free(sassc->targets, M_MPR);
3923 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3924 M_MPR, M_WAITOK|M_ZERO);
3925 if (!sassc->targets) {
3926 panic("%s failed to alloc targets with error %d\n",