2 * Copyright (c) 2009 Yahoo! Inc.
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 /* Communications core for Avago Technologies (LSI) MPT3 */
37 /* TODO Move headers to mprvar */
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/selinfo.h>
43 #include <sys/module.h>
47 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/endian.h>
51 #include <sys/queue.h>
52 #include <sys/kthread.h>
53 #include <sys/taskqueue.h>
56 #include <machine/bus.h>
57 #include <machine/resource.h>
60 #include <machine/stdarg.h>
63 #include <cam/cam_ccb.h>
64 #include <cam/cam_debug.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_xpt_periph.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
71 #if __FreeBSD_version >= 900026
72 #include <cam/scsi/smp_all.h>
75 #include <dev/nvme/nvme.h>
77 #include <dev/mpr/mpi/mpi2_type.h>
78 #include <dev/mpr/mpi/mpi2.h>
79 #include <dev/mpr/mpi/mpi2_ioc.h>
80 #include <dev/mpr/mpi/mpi2_sas.h>
81 #include <dev/mpr/mpi/mpi2_pci.h>
82 #include <dev/mpr/mpi/mpi2_cnfg.h>
83 #include <dev/mpr/mpi/mpi2_init.h>
84 #include <dev/mpr/mpi/mpi2_tool.h>
85 #include <dev/mpr/mpr_ioctl.h>
86 #include <dev/mpr/mprvar.h>
87 #include <dev/mpr/mpr_table.h>
88 #include <dev/mpr/mpr_sas.h>
90 #define MPRSAS_DISCOVERY_TIMEOUT 20
91 #define MPRSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
94 * static array to check SCSI OpCode for EEDP protection bits
96 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
97 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
98 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 static uint8_t op_code_prot[256] = {
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
109 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
118 MALLOC_DEFINE(M_MPRSAS, "MPRSAS", "MPR SAS memory");
120 static void mprsas_remove_device(struct mpr_softc *, struct mpr_command *);
121 static void mprsas_remove_complete(struct mpr_softc *, struct mpr_command *);
122 static void mprsas_action(struct cam_sim *sim, union ccb *ccb);
123 static void mprsas_poll(struct cam_sim *sim);
124 static void mprsas_scsiio_timeout(void *data);
125 static void mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *cm);
126 static void mprsas_action_scsiio(struct mprsas_softc *, union ccb *);
127 static void mprsas_scsiio_complete(struct mpr_softc *, struct mpr_command *);
128 static void mprsas_action_resetdev(struct mprsas_softc *, union ccb *);
129 static void mprsas_resetdev_complete(struct mpr_softc *, struct mpr_command *);
130 static int mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
131 struct mpr_command *cm);
132 static void mprsas_async(void *callback_arg, uint32_t code,
133 struct cam_path *path, void *arg);
134 #if (__FreeBSD_version < 901503) || \
135 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
136 static void mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
137 struct ccb_getdev *cgd);
138 static void mprsas_read_cap_done(struct cam_periph *periph,
139 union ccb *done_ccb);
141 static int mprsas_send_portenable(struct mpr_softc *sc);
142 static void mprsas_portenable_complete(struct mpr_softc *sc,
143 struct mpr_command *cm);
145 #if __FreeBSD_version >= 900026
146 static void mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm);
147 static void mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb,
149 static void mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb);
150 #endif //FreeBSD_version >= 900026
152 struct mprsas_target *
153 mprsas_find_target_by_handle(struct mprsas_softc *sassc, int start,
156 struct mprsas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mprsas_startup_increment(struct mprsas_softc *sassc)
178 MPR_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mpr_dprint(sassc->sc, MPR_INIT,
184 "%s freezing simq\n", __func__);
185 #if (__FreeBSD_version >= 1000039) || \
186 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
189 xpt_freeze_simq(sassc->sim, 1);
191 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
192 sassc->startup_refcount);
197 mprsas_release_simq_reinit(struct mprsas_softc *sassc)
199 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
200 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
201 xpt_release_simq(sassc->sim, 1);
202 mpr_dprint(sassc->sc, MPR_INFO, "Unfreezing SIM queue\n");
207 mprsas_startup_decrement(struct mprsas_softc *sassc)
209 MPR_FUNCTRACE(sassc->sc);
211 if ((sassc->flags & MPRSAS_IN_STARTUP) != 0) {
212 if (--sassc->startup_refcount == 0) {
213 /* finished all discovery-related actions, release
214 * the simq and rescan for the latest topology.
216 mpr_dprint(sassc->sc, MPR_INIT,
217 "%s releasing simq\n", __func__);
218 sassc->flags &= ~MPRSAS_IN_STARTUP;
219 xpt_release_simq(sassc->sim, 1);
220 #if (__FreeBSD_version >= 1000039) || \
221 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
224 mprsas_rescan_target(sassc->sc, NULL);
227 mpr_dprint(sassc->sc, MPR_INIT, "%s refcount %u\n", __func__,
228 sassc->startup_refcount);
233 * The firmware requires us to stop sending commands when we're doing task
236 * XXX The logic for serializing the device has been made lazy and moved to
237 * mprsas_prepare_for_tm().
240 mprsas_alloc_tm(struct mpr_softc *sc)
242 struct mpr_command *tm;
245 tm = mpr_alloc_high_priority_command(sc);
250 mprsas_free_tm(struct mpr_softc *sc, struct mpr_command *tm)
252 int target_id = 0xFFFFFFFF;
259 * For TM's the devq is frozen for the device. Unfreeze it here and
260 * free the resources used for freezing the devq. Must clear the
261 * INRESET flag as well or scsi I/O will not work.
263 if (tm->cm_targ != NULL) {
264 tm->cm_targ->flags &= ~MPRSAS_TARGET_INRESET;
265 target_id = tm->cm_targ->tid;
268 mpr_dprint(sc, MPR_INFO, "Unfreezing devq for target ID %d\n",
270 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
271 xpt_free_path(tm->cm_ccb->ccb_h.path);
272 xpt_free_ccb(tm->cm_ccb);
275 mpr_free_high_priority_command(sc, tm);
279 mprsas_rescan_target(struct mpr_softc *sc, struct mprsas_target *targ)
281 struct mprsas_softc *sassc = sc->sassc;
283 target_id_t targetid;
287 pathid = cam_sim_path(sassc->sim);
289 targetid = CAM_TARGET_WILDCARD;
291 targetid = targ - sassc->targets;
294 * Allocate a CCB and schedule a rescan.
296 ccb = xpt_alloc_ccb_nowait();
298 mpr_dprint(sc, MPR_ERROR, "unable to alloc CCB for rescan\n");
302 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
303 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
304 mpr_dprint(sc, MPR_ERROR, "unable to create path for rescan\n");
309 if (targetid == CAM_TARGET_WILDCARD)
310 ccb->ccb_h.func_code = XPT_SCAN_BUS;
312 ccb->ccb_h.func_code = XPT_SCAN_TGT;
314 mpr_dprint(sc, MPR_TRACE, "%s targetid %u\n", __func__, targetid);
319 mprsas_log_command(struct mpr_command *cm, u_int level, const char *fmt, ...)
329 /* No need to be in here if debugging isn't enabled */
330 if ((cm->cm_sc->mpr_debug & level) == 0)
333 sbuf_new(&sb, str, sizeof(str), 0);
337 if (cm->cm_ccb != NULL) {
338 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
340 sbuf_cat(&sb, path_str);
341 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
342 scsi_command_string(&cm->cm_ccb->csio, &sb);
343 sbuf_printf(&sb, "length %d ",
344 cm->cm_ccb->csio.dxfer_len);
347 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
348 cam_sim_name(cm->cm_sc->sassc->sim),
349 cam_sim_unit(cm->cm_sc->sassc->sim),
350 cam_sim_bus(cm->cm_sc->sassc->sim),
351 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
355 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
356 sbuf_vprintf(&sb, fmt, ap);
358 mpr_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
364 mprsas_remove_volume(struct mpr_softc *sc, struct mpr_command *tm)
366 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
367 struct mprsas_target *targ;
372 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
373 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
377 /* XXX retry the remove after the diag reset completes? */
378 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
379 "0x%04x\n", __func__, handle);
380 mprsas_free_tm(sc, tm);
384 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
385 MPI2_IOCSTATUS_SUCCESS) {
386 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
387 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
390 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
391 le32toh(reply->TerminationCount));
392 mpr_free_reply(sc, tm->cm_reply_data);
393 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
395 mpr_dprint(sc, MPR_XINFO, "clearing target %u handle 0x%04x\n",
399 * Don't clear target if remove fails because things will get confusing.
400 * Leave the devname and sasaddr intact so that we know to avoid reusing
401 * this target id if possible, and so we can assign the same target id
402 * to this device if it comes back in the future.
404 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
405 MPI2_IOCSTATUS_SUCCESS) {
408 targ->encl_handle = 0x0;
409 targ->encl_level_valid = 0x0;
410 targ->encl_level = 0x0;
411 targ->connector_name[0] = ' ';
412 targ->connector_name[1] = ' ';
413 targ->connector_name[2] = ' ';
414 targ->connector_name[3] = ' ';
415 targ->encl_slot = 0x0;
416 targ->exp_dev_handle = 0x0;
418 targ->linkrate = 0x0;
421 targ->scsi_req_desc_type = 0;
424 mprsas_free_tm(sc, tm);
429 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
430 * Otherwise Volume Delete is same as Bare Drive Removal.
433 mprsas_prepare_volume_remove(struct mprsas_softc *sassc, uint16_t handle)
435 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
436 struct mpr_softc *sc;
437 struct mpr_command *cm;
438 struct mprsas_target *targ = NULL;
440 MPR_FUNCTRACE(sassc->sc);
443 targ = mprsas_find_target_by_handle(sassc, 0, handle);
445 /* FIXME: what is the action? */
446 /* We don't know about this device? */
447 mpr_dprint(sc, MPR_ERROR,
448 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
452 targ->flags |= MPRSAS_TARGET_INREMOVAL;
454 cm = mprsas_alloc_tm(sc);
456 mpr_dprint(sc, MPR_ERROR,
457 "%s: command alloc failure\n", __func__);
461 mprsas_rescan_target(sc, targ);
463 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
464 req->DevHandle = targ->handle;
465 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
466 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
468 /* SAS Hard Link Reset / SATA Link Reset */
469 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
473 cm->cm_complete = mprsas_remove_volume;
474 cm->cm_complete_data = (void *)(uintptr_t)handle;
476 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
477 __func__, targ->tid);
478 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
480 mpr_map_command(sc, cm);
484 * The firmware performs debounce on the link to avoid transient link errors
485 * and false removals. When it does decide that link has been lost and a
486 * device needs to go away, it expects that the host will perform a target reset
487 * and then an op remove. The reset has the side-effect of aborting any
488 * outstanding requests for the device, which is required for the op-remove to
489 * succeed. It's not clear if the host should check for the device coming back
490 * alive after the reset.
493 mprsas_prepare_remove(struct mprsas_softc *sassc, uint16_t handle)
495 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
496 struct mpr_softc *sc;
497 struct mpr_command *cm;
498 struct mprsas_target *targ = NULL;
500 MPR_FUNCTRACE(sassc->sc);
504 targ = mprsas_find_target_by_handle(sassc, 0, handle);
506 /* FIXME: what is the action? */
507 /* We don't know about this device? */
508 mpr_dprint(sc, MPR_ERROR, "%s : invalid handle 0x%x \n",
513 targ->flags |= MPRSAS_TARGET_INREMOVAL;
515 cm = mprsas_alloc_tm(sc);
517 mpr_dprint(sc, MPR_ERROR, "%s: command alloc failure\n",
522 mprsas_rescan_target(sc, targ);
524 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
525 memset(req, 0, sizeof(*req));
526 req->DevHandle = htole16(targ->handle);
527 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
528 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
530 /* SAS Hard Link Reset / SATA Link Reset */
531 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
535 cm->cm_complete = mprsas_remove_device;
536 cm->cm_complete_data = (void *)(uintptr_t)handle;
538 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
539 __func__, targ->tid);
540 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
542 mpr_map_command(sc, cm);
546 mprsas_remove_device(struct mpr_softc *sc, struct mpr_command *tm)
548 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
549 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
550 struct mprsas_target *targ;
551 struct mpr_command *next_cm;
556 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
557 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
561 * Currently there should be no way we can hit this case. It only
562 * happens when we have a failure to allocate chain frames, and
563 * task management commands don't have S/G lists.
565 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
566 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for remove of "
567 "handle %#04x! This should not happen!\n", __func__,
568 tm->cm_flags, handle);
572 /* XXX retry the remove after the diag reset completes? */
573 mpr_dprint(sc, MPR_FAULT, "%s NULL reply resetting device "
574 "0x%04x\n", __func__, handle);
575 mprsas_free_tm(sc, tm);
579 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
580 MPI2_IOCSTATUS_SUCCESS) {
581 mpr_dprint(sc, MPR_ERROR, "IOCStatus = 0x%x while resetting "
582 "device 0x%x\n", le16toh(reply->IOCStatus), handle);
585 mpr_dprint(sc, MPR_XINFO, "Reset aborted %u commands\n",
586 le32toh(reply->TerminationCount));
587 mpr_free_reply(sc, tm->cm_reply_data);
588 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
590 /* Reuse the existing command */
591 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
592 memset(req, 0, sizeof(*req));
593 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
594 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
595 req->DevHandle = htole16(handle);
597 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
598 tm->cm_complete = mprsas_remove_complete;
599 tm->cm_complete_data = (void *)(uintptr_t)handle;
601 mpr_map_command(sc, tm);
603 mpr_dprint(sc, MPR_INFO, "clearing target %u handle 0x%04x\n",
605 if (targ->encl_level_valid) {
606 mpr_dprint(sc, MPR_INFO, "At enclosure level %d, slot %d, "
607 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
608 targ->connector_name);
610 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
613 mpr_dprint(sc, MPR_XINFO, "Completing missed command %p\n", tm);
614 ccb = tm->cm_complete_data;
615 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
616 mprsas_scsiio_complete(sc, tm);
621 mprsas_remove_complete(struct mpr_softc *sc, struct mpr_command *tm)
623 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
625 struct mprsas_target *targ;
626 struct mprsas_lun *lun;
630 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
631 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
634 * Currently there should be no way we can hit this case. It only
635 * happens when we have a failure to allocate chain frames, and
636 * task management commands don't have S/G lists.
638 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
639 mpr_dprint(sc, MPR_XINFO, "%s: cm_flags = %#x for remove of "
640 "handle %#04x! This should not happen!\n", __func__,
641 tm->cm_flags, handle);
642 mprsas_free_tm(sc, tm);
647 /* most likely a chip reset */
648 mpr_dprint(sc, MPR_FAULT, "%s NULL reply removing device "
649 "0x%04x\n", __func__, handle);
650 mprsas_free_tm(sc, tm);
654 mpr_dprint(sc, MPR_XINFO, "%s on handle 0x%04x, IOCStatus= 0x%x\n",
655 __func__, handle, le16toh(reply->IOCStatus));
658 * Don't clear target if remove fails because things will get confusing.
659 * Leave the devname and sasaddr intact so that we know to avoid reusing
660 * this target id if possible, and so we can assign the same target id
661 * to this device if it comes back in the future.
663 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
664 MPI2_IOCSTATUS_SUCCESS) {
667 targ->encl_handle = 0x0;
668 targ->encl_level_valid = 0x0;
669 targ->encl_level = 0x0;
670 targ->connector_name[0] = ' ';
671 targ->connector_name[1] = ' ';
672 targ->connector_name[2] = ' ';
673 targ->connector_name[3] = ' ';
674 targ->encl_slot = 0x0;
675 targ->exp_dev_handle = 0x0;
677 targ->linkrate = 0x0;
680 targ->scsi_req_desc_type = 0;
682 while (!SLIST_EMPTY(&targ->luns)) {
683 lun = SLIST_FIRST(&targ->luns);
684 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
689 mprsas_free_tm(sc, tm);
693 mprsas_register_events(struct mpr_softc *sc)
698 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
699 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
700 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
701 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
702 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
703 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
704 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
705 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
706 setbit(events, MPI2_EVENT_IR_VOLUME);
707 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
708 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
709 setbit(events, MPI2_EVENT_TEMP_THRESHOLD);
710 setbit(events, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
711 if (sc->facts->MsgVersion >= MPI2_VERSION_02_06) {
712 setbit(events, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
713 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
714 setbit(events, MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
715 setbit(events, MPI2_EVENT_PCIE_ENUMERATION);
716 setbit(events, MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
720 mpr_register_events(sc, events, mprsas_evt_handler, NULL,
721 &sc->sassc->mprsas_eh);
727 mpr_attach_sas(struct mpr_softc *sc)
729 struct mprsas_softc *sassc;
731 int unit, error = 0, reqs;
734 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
736 sassc = malloc(sizeof(struct mprsas_softc), M_MPR, M_WAITOK|M_ZERO);
738 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
739 "Cannot allocate SAS subsystem memory\n");
744 * XXX MaxTargets could change during a reinit. Since we don't
745 * resize the targets[] array during such an event, cache the value
746 * of MaxTargets here so that we don't get into trouble later. This
747 * should move into the reinit logic.
749 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
750 sassc->targets = malloc(sizeof(struct mprsas_target) *
751 sassc->maxtargets, M_MPR, M_WAITOK|M_ZERO);
752 if (!sassc->targets) {
753 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
754 "Cannot allocate SAS target memory\n");
761 reqs = sc->num_reqs - sc->num_prireqs - 1;
762 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
763 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIMQ\n");
768 unit = device_get_unit(sc->mpr_dev);
769 sassc->sim = cam_sim_alloc(mprsas_action, mprsas_poll, "mpr", sassc,
770 unit, &sc->mpr_mtx, reqs, reqs, sassc->devq);
771 if (sassc->sim == NULL) {
772 mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot allocate SIM\n");
777 TAILQ_INIT(&sassc->ev_queue);
779 /* Initialize taskqueue for Event Handling */
780 TASK_INIT(&sassc->ev_task, 0, mprsas_firmware_event_work, sc);
781 sassc->ev_tq = taskqueue_create("mpr_taskq", M_NOWAIT | M_ZERO,
782 taskqueue_thread_enqueue, &sassc->ev_tq);
783 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
784 device_get_nameunit(sc->mpr_dev));
789 * XXX There should be a bus for every port on the adapter, but since
790 * we're just going to fake the topology for now, we'll pretend that
791 * everything is just a target on a single bus.
793 if ((error = xpt_bus_register(sassc->sim, sc->mpr_dev, 0)) != 0) {
794 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
795 "Error %d registering SCSI bus\n", error);
801 * Assume that discovery events will start right away.
803 * Hold off boot until discovery is complete.
805 sassc->flags |= MPRSAS_IN_STARTUP | MPRSAS_IN_DISCOVERY;
806 sc->sassc->startup_refcount = 0;
807 mprsas_startup_increment(sassc);
809 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
812 * Register for async events so we can determine the EEDP
813 * capabilities of devices.
815 status = xpt_create_path(&sassc->path, /*periph*/NULL,
816 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
818 if (status != CAM_REQ_CMP) {
819 mpr_dprint(sc, MPR_INIT|MPR_ERROR,
820 "Error %#x creating sim path\n", status);
825 #if (__FreeBSD_version >= 1000006) || \
826 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
827 event = AC_ADVINFO_CHANGED | AC_FOUND_DEVICE;
829 event = AC_FOUND_DEVICE;
833 * Prior to the CAM locking improvements, we can't call
834 * xpt_register_async() with a particular path specified.
836 * If a path isn't specified, xpt_register_async() will
837 * generate a wildcard path and acquire the XPT lock while
838 * it calls xpt_action() to execute the XPT_SASYNC_CB CCB.
839 * It will then drop the XPT lock once that is done.
841 * If a path is specified for xpt_register_async(), it will
842 * not acquire and drop the XPT lock around the call to
843 * xpt_action(). xpt_action() asserts that the caller
844 * holds the SIM lock, so the SIM lock has to be held when
845 * calling xpt_register_async() when the path is specified.
847 * But xpt_register_async calls xpt_for_all_devices(),
848 * which calls xptbustraverse(), which will acquire each
849 * SIM lock. When it traverses our particular bus, it will
850 * necessarily acquire the SIM lock, which will lead to a
851 * recursive lock acquisition.
853 * The CAM locking changes fix this problem by acquiring
854 * the XPT topology lock around bus traversal in
855 * xptbustraverse(), so the caller can hold the SIM lock
856 * and it does not cause a recursive lock acquisition.
858 * These __FreeBSD_version values are approximate, especially
859 * for stable/10, which is two months later than the actual
863 #if (__FreeBSD_version < 1000703) || \
864 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
866 status = xpt_register_async(event, mprsas_async, sc,
870 status = xpt_register_async(event, mprsas_async, sc,
874 if (status != CAM_REQ_CMP) {
875 mpr_dprint(sc, MPR_ERROR,
876 "Error %#x registering async handler for "
877 "AC_ADVINFO_CHANGED events\n", status);
878 xpt_free_path(sassc->path);
882 if (status != CAM_REQ_CMP) {
884 * EEDP use is the exception, not the rule.
885 * Warn the user, but do not fail to attach.
887 mpr_printf(sc, "EEDP capabilities disabled.\n");
892 mprsas_register_events(sc);
897 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
902 mpr_detach_sas(struct mpr_softc *sc)
904 struct mprsas_softc *sassc;
905 struct mprsas_lun *lun, *lun_tmp;
906 struct mprsas_target *targ;
911 if (sc->sassc == NULL)
915 mpr_deregister_events(sc, sassc->mprsas_eh);
918 * Drain and free the event handling taskqueue with the lock
919 * unheld so that any parallel processing tasks drain properly
920 * without deadlocking.
922 if (sassc->ev_tq != NULL)
923 taskqueue_free(sassc->ev_tq);
925 /* Make sure CAM doesn't wedge if we had to bail out early. */
928 while (sassc->startup_refcount != 0)
929 mprsas_startup_decrement(sassc);
931 /* Deregister our async handler */
932 if (sassc->path != NULL) {
933 xpt_register_async(0, mprsas_async, sc, sassc->path);
934 xpt_free_path(sassc->path);
938 if (sassc->flags & MPRSAS_IN_STARTUP)
939 xpt_release_simq(sassc->sim, 1);
941 if (sassc->sim != NULL) {
942 xpt_bus_deregister(cam_sim_path(sassc->sim));
943 cam_sim_free(sassc->sim, FALSE);
948 if (sassc->devq != NULL)
949 cam_simq_free(sassc->devq);
951 for (i = 0; i < sassc->maxtargets; i++) {
952 targ = &sassc->targets[i];
953 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
957 free(sassc->targets, M_MPR);
965 mprsas_discovery_end(struct mprsas_softc *sassc)
967 struct mpr_softc *sc = sassc->sc;
971 if (sassc->flags & MPRSAS_DISCOVERY_TIMEOUT_PENDING)
972 callout_stop(&sassc->discovery_callout);
975 * After discovery has completed, check the mapping table for any
976 * missing devices and update their missing counts. Only do this once
977 * whenever the driver is initialized so that missing counts aren't
978 * updated unnecessarily. Note that just because discovery has
979 * completed doesn't mean that events have been processed yet. The
980 * check_devices function is a callout timer that checks if ALL devices
981 * are missing. If so, it will wait a little longer for events to
982 * complete and keep resetting itself until some device in the mapping
983 * table is not missing, meaning that event processing has started.
985 if (sc->track_mapping_events) {
986 mpr_dprint(sc, MPR_XINFO | MPR_MAPPING, "Discovery has "
987 "completed. Check for missing devices in the mapping "
989 callout_reset(&sc->device_check_callout,
990 MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices,
996 mprsas_action(struct cam_sim *sim, union ccb *ccb)
998 struct mprsas_softc *sassc;
1000 sassc = cam_sim_softc(sim);
1002 MPR_FUNCTRACE(sassc->sc);
1003 mpr_dprint(sassc->sc, MPR_TRACE, "ccb func_code 0x%x\n",
1004 ccb->ccb_h.func_code);
1005 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
1007 switch (ccb->ccb_h.func_code) {
1010 struct ccb_pathinq *cpi = &ccb->cpi;
1011 struct mpr_softc *sc = sassc->sc;
1013 cpi->version_num = 1;
1014 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1015 cpi->target_sprt = 0;
1016 #if (__FreeBSD_version >= 1000039) || \
1017 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502))
1018 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
1020 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
1022 cpi->hba_eng_cnt = 0;
1023 cpi->max_target = sassc->maxtargets - 1;
1027 * initiator_id is set here to an ID outside the set of valid
1028 * target IDs (including volumes).
1030 cpi->initiator_id = sassc->maxtargets;
1031 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1032 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
1033 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1034 cpi->unit_number = cam_sim_unit(sim);
1035 cpi->bus_id = cam_sim_bus(sim);
1037 * XXXSLM-I think this needs to change based on config page or
1038 * something instead of hardcoded to 150000.
1040 cpi->base_transfer_speed = 150000;
1041 cpi->transport = XPORT_SAS;
1042 cpi->transport_version = 0;
1043 cpi->protocol = PROTO_SCSI;
1044 cpi->protocol_version = SCSI_REV_SPC;
1045 cpi->maxio = sc->maxio;
1046 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1049 case XPT_GET_TRAN_SETTINGS:
1051 struct ccb_trans_settings *cts;
1052 struct ccb_trans_settings_sas *sas;
1053 struct ccb_trans_settings_scsi *scsi;
1054 struct mprsas_target *targ;
1057 sas = &cts->xport_specific.sas;
1058 scsi = &cts->proto_specific.scsi;
1060 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1061 ("Target %d out of bounds in XPT_GET_TRAN_SETTINGS\n",
1062 cts->ccb_h.target_id));
1063 targ = &sassc->targets[cts->ccb_h.target_id];
1064 if (targ->handle == 0x0) {
1065 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1069 cts->protocol_version = SCSI_REV_SPC2;
1070 cts->transport = XPORT_SAS;
1071 cts->transport_version = 0;
1073 sas->valid = CTS_SAS_VALID_SPEED;
1074 switch (targ->linkrate) {
1076 sas->bitrate = 150000;
1079 sas->bitrate = 300000;
1082 sas->bitrate = 600000;
1085 sas->bitrate = 1200000;
1091 cts->protocol = PROTO_SCSI;
1092 scsi->valid = CTS_SCSI_VALID_TQ;
1093 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1095 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1098 case XPT_CALC_GEOMETRY:
1099 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1100 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1103 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action "
1105 mprsas_action_resetdev(sassc, ccb);
1110 mpr_dprint(sassc->sc, MPR_XINFO, "mprsas_action faking success "
1111 "for abort or reset\n");
1112 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1115 mprsas_action_scsiio(sassc, ccb);
1117 #if __FreeBSD_version >= 900026
1119 mprsas_action_smpio(sassc, ccb);
1123 mprsas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1131 mprsas_announce_reset(struct mpr_softc *sc, uint32_t ac_code,
1132 target_id_t target_id, lun_id_t lun_id)
1134 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1135 struct cam_path *path;
1137 mpr_dprint(sc, MPR_XINFO, "%s code %x target %d lun %jx\n", __func__,
1138 ac_code, target_id, (uintmax_t)lun_id);
1140 if (xpt_create_path(&path, NULL,
1141 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1142 mpr_dprint(sc, MPR_ERROR, "unable to create path for reset "
1147 xpt_async(ac_code, path, NULL);
1148 xpt_free_path(path);
1152 mprsas_complete_all_commands(struct mpr_softc *sc)
1154 struct mpr_command *cm;
1159 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1161 /* complete all commands with a NULL reply */
1162 for (i = 1; i < sc->num_reqs; i++) {
1163 cm = &sc->commands[i];
1164 if (cm->cm_state == MPR_CM_STATE_FREE)
1167 cm->cm_state = MPR_CM_STATE_BUSY;
1168 cm->cm_reply = NULL;
1171 if (cm->cm_flags & MPR_CM_FLAGS_SATA_ID_TIMEOUT) {
1173 free(cm->cm_data, M_MPR);
1177 if (cm->cm_flags & MPR_CM_FLAGS_POLLED)
1178 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE;
1180 if (cm->cm_complete != NULL) {
1181 mprsas_log_command(cm, MPR_RECOVERY,
1182 "completing cm %p state %x ccb %p for diag reset\n",
1183 cm, cm->cm_state, cm->cm_ccb);
1184 cm->cm_complete(sc, cm);
1186 } else if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) {
1187 mprsas_log_command(cm, MPR_RECOVERY,
1188 "waking up cm %p state %x ccb %p for diag reset\n",
1189 cm, cm->cm_state, cm->cm_ccb);
1194 if ((completed == 0) && (cm->cm_state != MPR_CM_STATE_FREE)) {
1195 /* this should never happen, but if it does, log */
1196 mprsas_log_command(cm, MPR_RECOVERY,
1197 "cm %p state %x flags 0x%x ccb %p during diag "
1198 "reset\n", cm, cm->cm_state, cm->cm_flags,
1203 sc->io_cmds_active = 0;
1207 mprsas_handle_reinit(struct mpr_softc *sc)
1211 /* Go back into startup mode and freeze the simq, so that CAM
1212 * doesn't send any commands until after we've rediscovered all
1213 * targets and found the proper device handles for them.
1215 * After the reset, portenable will trigger discovery, and after all
1216 * discovery-related activities have finished, the simq will be
1219 mpr_dprint(sc, MPR_INIT, "%s startup\n", __func__);
1220 sc->sassc->flags |= MPRSAS_IN_STARTUP;
1221 sc->sassc->flags |= MPRSAS_IN_DISCOVERY;
1222 mprsas_startup_increment(sc->sassc);
1224 /* notify CAM of a bus reset */
1225 mprsas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1228 /* complete and cleanup after all outstanding commands */
1229 mprsas_complete_all_commands(sc);
1231 mpr_dprint(sc, MPR_INIT, "%s startup %u after command completion\n",
1232 __func__, sc->sassc->startup_refcount);
1234 /* zero all the target handles, since they may change after the
1235 * reset, and we have to rediscover all the targets and use the new
1238 for (i = 0; i < sc->sassc->maxtargets; i++) {
1239 if (sc->sassc->targets[i].outstanding != 0)
1240 mpr_dprint(sc, MPR_INIT, "target %u outstanding %u\n",
1241 i, sc->sassc->targets[i].outstanding);
1242 sc->sassc->targets[i].handle = 0x0;
1243 sc->sassc->targets[i].exp_dev_handle = 0x0;
1244 sc->sassc->targets[i].outstanding = 0;
1245 sc->sassc->targets[i].flags = MPRSAS_TARGET_INDIAGRESET;
1249 mprsas_tm_timeout(void *data)
1251 struct mpr_command *tm = data;
1252 struct mpr_softc *sc = tm->cm_sc;
1254 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1256 mprsas_log_command(tm, MPR_INFO|MPR_RECOVERY, "task mgmt %p timed "
1259 KASSERT(tm->cm_state == MPR_CM_STATE_INQUEUE,
1260 ("command not inqueue\n"));
1262 tm->cm_state = MPR_CM_STATE_BUSY;
1267 mprsas_logical_unit_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1269 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1270 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1271 unsigned int cm_count = 0;
1272 struct mpr_command *cm;
1273 struct mprsas_target *targ;
1275 callout_stop(&tm->cm_callout);
1277 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1278 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1282 * Currently there should be no way we can hit this case. It only
1283 * happens when we have a failure to allocate chain frames, and
1284 * task management commands don't have S/G lists.
1286 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1287 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1288 "%s: cm_flags = %#x for LUN reset! "
1289 "This should not happen!\n", __func__, tm->cm_flags);
1290 mprsas_free_tm(sc, tm);
1294 if (reply == NULL) {
1295 mpr_dprint(sc, MPR_RECOVERY, "NULL reset reply for tm %p\n",
1297 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1298 /* this completion was due to a reset, just cleanup */
1299 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1300 "reset, ignoring NULL LUN reset reply\n");
1302 mprsas_free_tm(sc, tm);
1305 /* we should have gotten a reply. */
1306 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1307 "LUN reset attempt, resetting controller\n");
1313 mpr_dprint(sc, MPR_RECOVERY,
1314 "logical unit reset status 0x%x code 0x%x count %u\n",
1315 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1316 le32toh(reply->TerminationCount));
1319 * See if there are any outstanding commands for this LUN.
1320 * This could be made more efficient by using a per-LU data
1321 * structure of some sort.
1323 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1324 if (cm->cm_lun == tm->cm_lun)
1328 if (cm_count == 0) {
1329 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1330 "Finished recovery after LUN reset for target %u\n",
1333 mprsas_announce_reset(sc, AC_SENT_BDR, targ->tid,
1337 * We've finished recovery for this logical unit. check and
1338 * see if some other logical unit has a timedout command
1339 * that needs to be processed.
1341 cm = TAILQ_FIRST(&targ->timedout_commands);
1343 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1344 "More commands to abort for target %u\n", targ->tid);
1345 mprsas_send_abort(sc, tm, cm);
1348 mprsas_free_tm(sc, tm);
1351 /* if we still have commands for this LUN, the reset
1352 * effectively failed, regardless of the status reported.
1353 * Escalate to a target reset.
1355 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1356 "logical unit reset complete for target %u, but still "
1357 "have %u command(s), sending target reset\n", targ->tid,
1359 mprsas_send_reset(sc, tm,
1360 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1365 mprsas_target_reset_complete(struct mpr_softc *sc, struct mpr_command *tm)
1367 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1368 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1369 struct mprsas_target *targ;
1371 callout_stop(&tm->cm_callout);
1373 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1374 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1378 * Currently there should be no way we can hit this case. It only
1379 * happens when we have a failure to allocate chain frames, and
1380 * task management commands don't have S/G lists.
1382 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1383 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for target "
1384 "reset! This should not happen!\n", __func__, tm->cm_flags);
1385 mprsas_free_tm(sc, tm);
1389 if (reply == NULL) {
1390 mpr_dprint(sc, MPR_RECOVERY,
1391 "NULL target reset reply for tm %p TaskMID %u\n",
1392 tm, le16toh(req->TaskMID));
1393 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1394 /* this completion was due to a reset, just cleanup */
1395 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1396 "reset, ignoring NULL target reset reply\n");
1398 mprsas_free_tm(sc, tm);
1401 /* we should have gotten a reply. */
1402 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1403 "target reset attempt, resetting controller\n");
1409 mpr_dprint(sc, MPR_RECOVERY,
1410 "target reset status 0x%x code 0x%x count %u\n",
1411 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1412 le32toh(reply->TerminationCount));
1414 if (targ->outstanding == 0) {
1416 * We've finished recovery for this target and all
1417 * of its logical units.
1419 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1420 "Finished reset recovery for target %u\n", targ->tid);
1422 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1426 mprsas_free_tm(sc, tm);
1429 * After a target reset, if this target still has
1430 * outstanding commands, the reset effectively failed,
1431 * regardless of the status reported. escalate.
1433 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1434 "Target reset complete for target %u, but still have %u "
1435 "command(s), resetting controller\n", targ->tid,
1441 #define MPR_RESET_TIMEOUT 30
1444 mprsas_send_reset(struct mpr_softc *sc, struct mpr_command *tm, uint8_t type)
1446 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1447 struct mprsas_target *target;
1450 target = tm->cm_targ;
1451 if (target->handle == 0) {
1452 mpr_dprint(sc, MPR_ERROR, "%s null devhandle for target_id "
1453 "%d\n", __func__, target->tid);
1457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1458 req->DevHandle = htole16(target->handle);
1459 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1460 req->TaskType = type;
1462 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1463 /* XXX Need to handle invalid LUNs */
1464 MPR_SET_LUN(req->LUN, tm->cm_lun);
1465 tm->cm_targ->logical_unit_resets++;
1466 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1467 "Sending logical unit reset to target %u lun %d\n",
1468 target->tid, tm->cm_lun);
1469 tm->cm_complete = mprsas_logical_unit_reset_complete;
1470 mprsas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1471 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1473 * Target reset method =
1474 * SAS Hard Link Reset / SATA Link Reset
1476 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1477 tm->cm_targ->target_resets++;
1478 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1479 "Sending target reset to target %u\n", target->tid);
1480 tm->cm_complete = mprsas_target_reset_complete;
1481 mprsas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1484 mpr_dprint(sc, MPR_ERROR, "unexpected reset type 0x%x\n", type);
1488 if (target->encl_level_valid) {
1489 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1490 "At enclosure level %d, slot %d, connector name (%4s)\n",
1491 target->encl_level, target->encl_slot,
1492 target->connector_name);
1496 tm->cm_complete_data = (void *)tm;
1498 callout_reset(&tm->cm_callout, MPR_RESET_TIMEOUT * hz,
1499 mprsas_tm_timeout, tm);
1501 err = mpr_map_command(sc, tm);
1503 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1504 "error %d sending reset type %u\n", err, type);
1511 mprsas_abort_complete(struct mpr_softc *sc, struct mpr_command *tm)
1513 struct mpr_command *cm;
1514 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1515 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1516 struct mprsas_target *targ;
1518 callout_stop(&tm->cm_callout);
1520 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1521 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1525 * Currently there should be no way we can hit this case. It only
1526 * happens when we have a failure to allocate chain frames, and
1527 * task management commands don't have S/G lists.
1529 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
1530 mpr_dprint(sc, MPR_RECOVERY|MPR_ERROR,
1531 "cm_flags = %#x for abort %p TaskMID %u!\n",
1532 tm->cm_flags, tm, le16toh(req->TaskMID));
1533 mprsas_free_tm(sc, tm);
1537 if (reply == NULL) {
1538 mpr_dprint(sc, MPR_RECOVERY,
1539 "NULL abort reply for tm %p TaskMID %u\n",
1540 tm, le16toh(req->TaskMID));
1541 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
1542 /* this completion was due to a reset, just cleanup */
1543 mpr_dprint(sc, MPR_RECOVERY, "Hardware undergoing "
1544 "reset, ignoring NULL abort reply\n");
1546 mprsas_free_tm(sc, tm);
1548 /* we should have gotten a reply. */
1549 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY, "NULL reply on "
1550 "abort attempt, resetting controller\n");
1556 mpr_dprint(sc, MPR_RECOVERY,
1557 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1558 le16toh(req->TaskMID),
1559 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1560 le32toh(reply->TerminationCount));
1562 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1565 * if there are no more timedout commands, we're done with
1566 * error recovery for this target.
1568 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1569 "Finished abort recovery for target %u\n", targ->tid);
1571 mprsas_free_tm(sc, tm);
1572 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1573 /* abort success, but we have more timedout commands to abort */
1574 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1575 "Continuing abort recovery for target %u\n", targ->tid);
1576 mprsas_send_abort(sc, tm, cm);
1579 * we didn't get a command completion, so the abort
1580 * failed as far as we're concerned. escalate.
1582 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1583 "Abort failed for target %u, sending logical unit reset\n",
1586 mprsas_send_reset(sc, tm,
1587 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1591 #define MPR_ABORT_TIMEOUT 5
1594 mprsas_send_abort(struct mpr_softc *sc, struct mpr_command *tm,
1595 struct mpr_command *cm)
1597 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1598 struct mprsas_target *targ;
1602 if (targ->handle == 0) {
1603 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1604 "%s null devhandle for target_id %d\n",
1605 __func__, cm->cm_ccb->ccb_h.target_id);
1609 mprsas_log_command(cm, MPR_RECOVERY|MPR_INFO,
1610 "Aborting command %p\n", cm);
1612 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1613 req->DevHandle = htole16(targ->handle);
1614 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1615 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1617 /* XXX Need to handle invalid LUNs */
1618 MPR_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1620 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1623 tm->cm_complete = mprsas_abort_complete;
1624 tm->cm_complete_data = (void *)tm;
1625 tm->cm_targ = cm->cm_targ;
1626 tm->cm_lun = cm->cm_lun;
1628 callout_reset(&tm->cm_callout, MPR_ABORT_TIMEOUT * hz,
1629 mprsas_tm_timeout, tm);
1633 mprsas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1635 err = mpr_map_command(sc, tm);
1637 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1638 "error %d sending abort for cm %p SMID %u\n",
1639 err, cm, req->TaskMID);
1644 mprsas_scsiio_timeout(void *data)
1646 sbintime_t elapsed, now;
1648 struct mpr_softc *sc;
1649 struct mpr_command *cm;
1650 struct mprsas_target *targ;
1652 cm = (struct mpr_command *)data;
1658 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1660 mpr_dprint(sc, MPR_XINFO|MPR_RECOVERY, "Timeout checking cm %p\n", cm);
1663 * Run the interrupt handler to make sure it's not pending. This
1664 * isn't perfect because the command could have already completed
1665 * and been re-used, though this is unlikely.
1667 mpr_intr_locked(sc);
1668 if (cm->cm_state != MPR_CM_STATE_INQUEUE) {
1669 mprsas_log_command(cm, MPR_XINFO,
1670 "SCSI command %p almost timed out\n", cm);
1674 if (cm->cm_ccb == NULL) {
1675 mpr_dprint(sc, MPR_ERROR, "command timeout with NULL ccb\n");
1682 elapsed = now - ccb->ccb_h.qos.sim_data;
1683 mprsas_log_command(cm, MPR_INFO|MPR_RECOVERY,
1684 "Command timeout on target %u(0x%04x), %d set, %d.%d elapsed\n",
1685 targ->tid, targ->handle, ccb->ccb_h.timeout,
1686 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1687 if (targ->encl_level_valid) {
1688 mpr_dprint(sc, MPR_INFO|MPR_RECOVERY,
1689 "At enclosure level %d, slot %d, connector name (%4s)\n",
1690 targ->encl_level, targ->encl_slot, targ->connector_name);
1693 /* XXX first, check the firmware state, to see if it's still
1694 * operational. if not, do a diag reset.
1696 mprsas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1697 cm->cm_state = MPR_CM_STATE_TIMEDOUT;
1698 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1700 if (targ->tm != NULL) {
1701 /* target already in recovery, just queue up another
1702 * timedout command to be processed later.
1704 mpr_dprint(sc, MPR_RECOVERY, "queued timedout cm %p for "
1705 "processing by tm %p\n", cm, targ->tm);
1707 else if ((targ->tm = mprsas_alloc_tm(sc)) != NULL) {
1709 /* start recovery by aborting the first timedout command */
1710 mpr_dprint(sc, MPR_RECOVERY|MPR_INFO,
1711 "Sending abort to target %u for SMID %d\n", targ->tid,
1712 cm->cm_desc.Default.SMID);
1713 mpr_dprint(sc, MPR_RECOVERY, "timedout cm %p allocated tm %p\n",
1715 mprsas_send_abort(sc, targ->tm, cm);
1718 /* XXX queue this target up for recovery once a TM becomes
1719 * available. The firmware only has a limited number of
1720 * HighPriority credits for the high priority requests used
1721 * for task management, and we ran out.
1723 * Isilon: don't worry about this for now, since we have
1724 * more credits than disks in an enclosure, and limit
1725 * ourselves to one TM per target for recovery.
1727 mpr_dprint(sc, MPR_ERROR|MPR_RECOVERY,
1728 "timedout cm %p failed to allocate a tm\n", cm);
1733 * mprsas_build_nvme_unmap - Build Native NVMe DSM command equivalent
1735 * Return 0 - for success,
1736 * 1 - to immediately return back the command with success status to CAM
1737 * negative value - to fallback to firmware path i.e. issue scsi unmap
1738 * to FW without any translation.
1741 mprsas_build_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm,
1742 union ccb *ccb, struct mprsas_target *targ)
1744 Mpi26NVMeEncapsulatedRequest_t *req = NULL;
1745 struct ccb_scsiio *csio;
1746 struct unmap_parm_list *plist;
1747 struct nvme_dsm_range *nvme_dsm_ranges = NULL;
1748 struct nvme_command *c;
1750 uint16_t ndesc, list_len, data_length;
1751 struct mpr_prp_page *prp_page_info;
1752 uint64_t nvme_dsm_ranges_dma_handle;
1755 #if __FreeBSD_version >= 1100103
1756 list_len = (scsiio_cdb_ptr(csio)[7] << 8 | scsiio_cdb_ptr(csio)[8]);
1758 if (csio->ccb_h.flags & CAM_CDB_POINTER) {
1759 list_len = (ccb->csio.cdb_io.cdb_ptr[7] << 8 |
1760 ccb->csio.cdb_io.cdb_ptr[8]);
1762 list_len = (ccb->csio.cdb_io.cdb_bytes[7] << 8 |
1763 ccb->csio.cdb_io.cdb_bytes[8]);
1767 mpr_dprint(sc, MPR_ERROR, "Parameter list length is Zero\n");
1771 plist = malloc(csio->dxfer_len, M_MPR, M_ZERO|M_NOWAIT);
1773 mpr_dprint(sc, MPR_ERROR, "Unable to allocate memory to "
1774 "save UNMAP data\n");
1778 /* Copy SCSI unmap data to a local buffer */
1779 bcopy(csio->data_ptr, plist, csio->dxfer_len);
1781 /* return back the unmap command to CAM with success status,
1782 * if number of descripts is zero.
1784 ndesc = be16toh(plist->unmap_blk_desc_data_len) >> 4;
1786 mpr_dprint(sc, MPR_XINFO, "Number of descriptors in "
1787 "UNMAP cmd is Zero\n");
1792 data_length = ndesc * sizeof(struct nvme_dsm_range);
1793 if (data_length > targ->MDTS) {
1794 mpr_dprint(sc, MPR_ERROR, "data length: %d is greater than "
1795 "Device's MDTS: %d\n", data_length, targ->MDTS);
1800 prp_page_info = mpr_alloc_prp_page(sc);
1801 KASSERT(prp_page_info != NULL, ("%s: There is no PRP Page for "
1802 "UNMAP command.\n", __func__));
1805 * Insert the allocated PRP page into the command's PRP page list. This
1806 * will be freed when the command is freed.
1808 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link);
1810 nvme_dsm_ranges = (struct nvme_dsm_range *)prp_page_info->prp_page;
1811 nvme_dsm_ranges_dma_handle = prp_page_info->prp_page_busaddr;
1813 bzero(nvme_dsm_ranges, data_length);
1815 /* Convert SCSI unmap's descriptor data to NVMe DSM specific Range data
1816 * for each descriptors contained in SCSI UNMAP data.
1818 for (i = 0; i < ndesc; i++) {
1819 nvme_dsm_ranges[i].length =
1820 htole32(be32toh(plist->desc[i].nlb));
1821 nvme_dsm_ranges[i].starting_lba =
1822 htole64(be64toh(plist->desc[i].slba));
1823 nvme_dsm_ranges[i].attributes = 0;
1826 /* Build MPI2.6's NVMe Encapsulated Request Message */
1827 req = (Mpi26NVMeEncapsulatedRequest_t *)cm->cm_req;
1828 bzero(req, sizeof(*req));
1829 req->DevHandle = htole16(targ->handle);
1830 req->Function = MPI2_FUNCTION_NVME_ENCAPSULATED;
1831 req->Flags = MPI26_NVME_FLAGS_WRITE;
1832 req->ErrorResponseBaseAddress.High =
1833 htole32((uint32_t)((uint64_t)cm->cm_sense_busaddr >> 32));
1834 req->ErrorResponseBaseAddress.Low =
1835 htole32(cm->cm_sense_busaddr);
1836 req->ErrorResponseAllocationLength =
1837 htole16(sizeof(struct nvme_completion));
1838 req->EncapsulatedCommandLength =
1839 htole16(sizeof(struct nvme_command));
1840 req->DataLength = htole32(data_length);
1842 /* Build NVMe DSM command */
1843 c = (struct nvme_command *) req->NVMe_Command;
1844 c->opc = NVME_OPC_DATASET_MANAGEMENT;
1845 c->nsid = htole32(csio->ccb_h.target_lun + 1);
1846 c->cdw10 = htole32(ndesc - 1);
1847 c->cdw11 = htole32(NVME_DSM_ATTR_DEALLOCATE);
1849 cm->cm_length = data_length;
1852 cm->cm_complete = mprsas_scsiio_complete;
1853 cm->cm_complete_data = ccb;
1855 cm->cm_lun = csio->ccb_h.target_lun;
1858 cm->cm_desc.Default.RequestFlags =
1859 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
1861 csio->ccb_h.qos.sim_data = sbinuptime();
1862 #if __FreeBSD_version >= 1000029
1863 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1864 mprsas_scsiio_timeout, cm, 0);
1865 #else //__FreeBSD_version < 1000029
1866 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
1867 mprsas_scsiio_timeout, cm);
1868 #endif //__FreeBSD_version >= 1000029
1871 targ->outstanding++;
1872 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1873 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1875 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
1876 __func__, cm, ccb, targ->outstanding);
1878 mpr_build_nvme_prp(sc, cm, req,
1879 (void *)(uintptr_t)nvme_dsm_ranges_dma_handle, 0, data_length);
1880 mpr_map_command(sc, cm);
1888 mprsas_action_scsiio(struct mprsas_softc *sassc, union ccb *ccb)
1890 MPI2_SCSI_IO_REQUEST *req;
1891 struct ccb_scsiio *csio;
1892 struct mpr_softc *sc;
1893 struct mprsas_target *targ;
1894 struct mprsas_lun *lun;
1895 struct mpr_command *cm;
1896 uint8_t i, lba_byte, *ref_tag_addr, scsi_opcode;
1897 uint16_t eedp_flags;
1898 uint32_t mpi_control;
1903 mtx_assert(&sc->mpr_mtx, MA_OWNED);
1906 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1907 ("Target %d out of bounds in XPT_SCSI_IO\n",
1908 csio->ccb_h.target_id));
1909 targ = &sassc->targets[csio->ccb_h.target_id];
1910 mpr_dprint(sc, MPR_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1911 if (targ->handle == 0x0) {
1912 mpr_dprint(sc, MPR_ERROR, "%s NULL handle for target %u\n",
1913 __func__, csio->ccb_h.target_id);
1914 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1918 if (targ->flags & MPR_TARGET_FLAGS_RAID_COMPONENT) {
1919 mpr_dprint(sc, MPR_ERROR, "%s Raid component no SCSI IO "
1920 "supported %u\n", __func__, csio->ccb_h.target_id);
1921 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1926 * Sometimes, it is possible to get a command that is not "In
1927 * Progress" and was actually aborted by the upper layer. Check for
1928 * this here and complete the command without error.
1930 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1931 mpr_dprint(sc, MPR_TRACE, "%s Command is not in progress for "
1932 "target %u\n", __func__, csio->ccb_h.target_id);
1937 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1938 * that the volume has timed out. We want volumes to be enumerated
1939 * until they are deleted/removed, not just failed.
1941 if (targ->flags & MPRSAS_TARGET_INREMOVAL) {
1942 if (targ->devinfo == 0)
1943 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
1945 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1950 if ((sc->mpr_flags & MPR_FLAGS_SHUTDOWN) != 0) {
1951 mpr_dprint(sc, MPR_INFO, "%s shutting down\n", __func__);
1952 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1958 * If target has a reset in progress, freeze the devq and return. The
1959 * devq will be released when the TM reset is finished.
1961 if (targ->flags & MPRSAS_TARGET_INRESET) {
1962 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1963 mpr_dprint(sc, MPR_INFO, "%s: Freezing devq for target ID %d\n",
1964 __func__, targ->tid);
1965 xpt_freeze_devq(ccb->ccb_h.path, 1);
1970 cm = mpr_alloc_command(sc);
1971 if (cm == NULL || (sc->mpr_flags & MPR_FLAGS_DIAGRESET)) {
1973 mpr_free_command(sc, cm);
1975 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
1976 xpt_freeze_simq(sassc->sim, 1);
1977 sassc->flags |= MPRSAS_QUEUE_FROZEN;
1979 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1980 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1985 /* For NVME device's issue UNMAP command directly to NVME drives by
1986 * constructing equivalent native NVMe DataSetManagement command.
1988 #if __FreeBSD_version >= 1100103
1989 scsi_opcode = scsiio_cdb_ptr(csio)[0];
1991 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1992 scsi_opcode = csio->cdb_io.cdb_ptr[0];
1994 scsi_opcode = csio->cdb_io.cdb_bytes[0];
1996 if (scsi_opcode == UNMAP &&
1998 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
1999 rc = mprsas_build_nvme_unmap(sc, cm, ccb, targ);
2000 if (rc == 1) { /* return command to CAM with success status */
2001 mpr_free_command(sc, cm);
2002 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2005 } else if (!rc) /* Issued NVMe Encapsulated Request Message */
2009 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
2010 bzero(req, sizeof(*req));
2011 req->DevHandle = htole16(targ->handle);
2012 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2014 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
2015 req->SenseBufferLength = MPR_SENSE_LEN;
2017 req->ChainOffset = 0;
2018 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
2023 req->DataLength = htole32(csio->dxfer_len);
2024 req->BidirectionalDataLength = 0;
2025 req->IoFlags = htole16(csio->cdb_len);
2028 /* Note: BiDirectional transfers are not supported */
2029 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
2031 mpi_control = MPI2_SCSIIO_CONTROL_READ;
2032 cm->cm_flags |= MPR_CM_FLAGS_DATAIN;
2035 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
2036 cm->cm_flags |= MPR_CM_FLAGS_DATAOUT;
2040 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
2044 if (csio->cdb_len == 32)
2045 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
2047 * It looks like the hardware doesn't require an explicit tag
2048 * number for each transaction. SAM Task Management not supported
2051 switch (csio->tag_action) {
2052 case MSG_HEAD_OF_Q_TAG:
2053 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
2055 case MSG_ORDERED_Q_TAG:
2056 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
2059 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
2061 case CAM_TAG_ACTION_NONE:
2062 case MSG_SIMPLE_Q_TAG:
2064 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
2067 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
2068 req->Control = htole32(mpi_control);
2070 if (MPR_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
2071 mpr_free_command(sc, cm);
2072 mprsas_set_ccbstatus(ccb, CAM_LUN_INVALID);
2077 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2078 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
2080 KASSERT(csio->cdb_len <= IOCDBLEN,
2081 ("cdb_len %d is greater than IOCDBLEN but CAM_CDB_POINTER "
2082 "is not set", csio->cdb_len));
2083 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
2085 req->IoFlags = htole16(csio->cdb_len);
2088 * Check if EEDP is supported and enabled. If it is then check if the
2089 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
2090 * is formatted for EEDP support. If all of this is true, set CDB up
2091 * for EEDP transfer.
2093 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
2094 if (sc->eedp_enabled && eedp_flags) {
2095 SLIST_FOREACH(lun, &targ->luns, lun_link) {
2096 if (lun->lun_id == csio->ccb_h.target_lun) {
2101 if ((lun != NULL) && (lun->eedp_formatted)) {
2102 req->EEDPBlockSize = htole16(lun->eedp_block_size);
2103 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2104 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2105 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2106 if (sc->mpr_flags & MPR_FLAGS_GEN35_IOC) {
2108 MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
2110 req->EEDPFlags = htole16(eedp_flags);
2113 * If CDB less than 32, fill in Primary Ref Tag with
2114 * low 4 bytes of LBA. If CDB is 32, tag stuff is
2115 * already there. Also, set protection bit. FreeBSD
2116 * currently does not support CDBs bigger than 16, but
2117 * the code doesn't hurt, and will be here for the
2120 if (csio->cdb_len != 32) {
2121 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
2122 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
2123 PrimaryReferenceTag;
2124 for (i = 0; i < 4; i++) {
2126 req->CDB.CDB32[lba_byte + i];
2129 req->CDB.EEDP32.PrimaryReferenceTag =
2131 CDB.EEDP32.PrimaryReferenceTag);
2132 req->CDB.EEDP32.PrimaryApplicationTagMask =
2135 (req->CDB.CDB32[1] & 0x1F) | 0x20;
2138 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
2139 req->EEDPFlags = htole16(eedp_flags);
2140 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
2146 cm->cm_length = csio->dxfer_len;
2147 if (cm->cm_length != 0) {
2149 cm->cm_flags |= MPR_CM_FLAGS_USE_CCB;
2153 cm->cm_sge = &req->SGL;
2154 cm->cm_sglsize = (32 - 24) * 4;
2155 cm->cm_complete = mprsas_scsiio_complete;
2156 cm->cm_complete_data = ccb;
2158 cm->cm_lun = csio->ccb_h.target_lun;
2161 * If using FP desc type, need to set a bit in IoFlags (SCSI IO is 0)
2162 * and set descriptor type.
2164 if (targ->scsi_req_desc_type ==
2165 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO) {
2166 req->IoFlags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
2167 cm->cm_desc.FastPathSCSIIO.RequestFlags =
2168 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2169 if (!sc->atomic_desc_capable) {
2170 cm->cm_desc.FastPathSCSIIO.DevHandle =
2171 htole16(targ->handle);
2174 cm->cm_desc.SCSIIO.RequestFlags =
2175 MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2176 if (!sc->atomic_desc_capable)
2177 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
2180 csio->ccb_h.qos.sim_data = sbinuptime();
2181 #if __FreeBSD_version >= 1000029
2182 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
2183 mprsas_scsiio_timeout, cm, 0);
2184 #else //__FreeBSD_version < 1000029
2185 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000,
2186 mprsas_scsiio_timeout, cm);
2187 #endif //__FreeBSD_version >= 1000029
2190 targ->outstanding++;
2191 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
2192 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2194 mprsas_log_command(cm, MPR_XINFO, "%s cm %p ccb %p outstanding %u\n",
2195 __func__, cm, ccb, targ->outstanding);
2197 mpr_map_command(sc, cm);
2202 * mpr_sc_failed_io_info - translated non-succesfull SCSI_IO request
2205 mpr_sc_failed_io_info(struct mpr_softc *sc, struct ccb_scsiio *csio,
2206 Mpi2SCSIIOReply_t *mpi_reply, struct mprsas_target *targ)
2210 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
2211 MPI2_IOCSTATUS_MASK;
2212 u8 scsi_state = mpi_reply->SCSIState;
2213 u8 scsi_status = mpi_reply->SCSIStatus;
2214 char *desc_ioc_state = NULL;
2215 char *desc_scsi_status = NULL;
2216 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
2218 if (log_info == 0x31170000)
2221 desc_ioc_state = mpr_describe_table(mpr_iocstatus_string,
2223 desc_scsi_status = mpr_describe_table(mpr_scsi_status_string,
2226 mpr_dprint(sc, MPR_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
2227 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
2228 if (targ->encl_level_valid) {
2229 mpr_dprint(sc, MPR_XINFO, "At enclosure level %d, slot %d, "
2230 "connector name (%4s)\n", targ->encl_level, targ->encl_slot,
2231 targ->connector_name);
2235 * We can add more detail about underflow data here
2238 mpr_dprint(sc, MPR_XINFO, "\tscsi_status(%s)(0x%02x), "
2239 "scsi_state %b\n", desc_scsi_status, scsi_status,
2240 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
2241 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
2243 if (sc->mpr_debug & MPR_XINFO &&
2244 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2245 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : Start :\n");
2246 scsi_sense_print(csio);
2247 mpr_dprint(sc, MPR_XINFO, "-> Sense Buffer Data : End :\n");
2250 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
2251 response_info = le32toh(mpi_reply->ResponseInfo);
2252 response_bytes = (u8 *)&response_info;
2253 mpr_dprint(sc, MPR_XINFO, "response code(0x%01x): %s\n",
2255 mpr_describe_table(mpr_scsi_taskmgmt_string,
2256 response_bytes[0]));
2260 /** mprsas_nvme_trans_status_code
2262 * Convert Native NVMe command error status to
2263 * equivalent SCSI error status.
2265 * Returns appropriate scsi_status
2268 mprsas_nvme_trans_status_code(uint16_t nvme_status,
2269 struct mpr_command *cm)
2271 u8 status = MPI2_SCSI_STATUS_GOOD;
2272 int skey, asc, ascq;
2273 union ccb *ccb = cm->cm_complete_data;
2274 int returned_sense_len;
2277 sct = NVME_STATUS_GET_SCT(nvme_status);
2278 sc = NVME_STATUS_GET_SC(nvme_status);
2280 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2281 skey = SSD_KEY_ILLEGAL_REQUEST;
2282 asc = SCSI_ASC_NO_SENSE;
2283 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2286 case NVME_SCT_GENERIC:
2288 case NVME_SC_SUCCESS:
2289 status = MPI2_SCSI_STATUS_GOOD;
2290 skey = SSD_KEY_NO_SENSE;
2291 asc = SCSI_ASC_NO_SENSE;
2292 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2294 case NVME_SC_INVALID_OPCODE:
2295 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2296 skey = SSD_KEY_ILLEGAL_REQUEST;
2297 asc = SCSI_ASC_ILLEGAL_COMMAND;
2298 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2300 case NVME_SC_INVALID_FIELD:
2301 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2302 skey = SSD_KEY_ILLEGAL_REQUEST;
2303 asc = SCSI_ASC_INVALID_CDB;
2304 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2306 case NVME_SC_DATA_TRANSFER_ERROR:
2307 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2308 skey = SSD_KEY_MEDIUM_ERROR;
2309 asc = SCSI_ASC_NO_SENSE;
2310 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2312 case NVME_SC_ABORTED_POWER_LOSS:
2313 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2314 skey = SSD_KEY_ABORTED_COMMAND;
2315 asc = SCSI_ASC_WARNING;
2316 ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
2318 case NVME_SC_INTERNAL_DEVICE_ERROR:
2319 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2320 skey = SSD_KEY_HARDWARE_ERROR;
2321 asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
2322 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2324 case NVME_SC_ABORTED_BY_REQUEST:
2325 case NVME_SC_ABORTED_SQ_DELETION:
2326 case NVME_SC_ABORTED_FAILED_FUSED:
2327 case NVME_SC_ABORTED_MISSING_FUSED:
2328 status = MPI2_SCSI_STATUS_TASK_ABORTED;
2329 skey = SSD_KEY_ABORTED_COMMAND;
2330 asc = SCSI_ASC_NO_SENSE;
2331 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2333 case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
2334 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2335 skey = SSD_KEY_ILLEGAL_REQUEST;
2336 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2337 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2339 case NVME_SC_LBA_OUT_OF_RANGE:
2340 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2341 skey = SSD_KEY_ILLEGAL_REQUEST;
2342 asc = SCSI_ASC_ILLEGAL_BLOCK;
2343 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2345 case NVME_SC_CAPACITY_EXCEEDED:
2346 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2347 skey = SSD_KEY_MEDIUM_ERROR;
2348 asc = SCSI_ASC_NO_SENSE;
2349 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2351 case NVME_SC_NAMESPACE_NOT_READY:
2352 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2353 skey = SSD_KEY_NOT_READY;
2354 asc = SCSI_ASC_LUN_NOT_READY;
2355 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2359 case NVME_SCT_COMMAND_SPECIFIC:
2361 case NVME_SC_INVALID_FORMAT:
2362 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2363 skey = SSD_KEY_ILLEGAL_REQUEST;
2364 asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
2365 ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
2367 case NVME_SC_CONFLICTING_ATTRIBUTES:
2368 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2369 skey = SSD_KEY_ILLEGAL_REQUEST;
2370 asc = SCSI_ASC_INVALID_CDB;
2371 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2375 case NVME_SCT_MEDIA_ERROR:
2377 case NVME_SC_WRITE_FAULTS:
2378 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2379 skey = SSD_KEY_MEDIUM_ERROR;
2380 asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
2381 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2383 case NVME_SC_UNRECOVERED_READ_ERROR:
2384 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2385 skey = SSD_KEY_MEDIUM_ERROR;
2386 asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
2387 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2389 case NVME_SC_GUARD_CHECK_ERROR:
2390 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2391 skey = SSD_KEY_MEDIUM_ERROR;
2392 asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
2393 ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
2395 case NVME_SC_APPLICATION_TAG_CHECK_ERROR:
2396 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2397 skey = SSD_KEY_MEDIUM_ERROR;
2398 asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
2399 ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
2401 case NVME_SC_REFERENCE_TAG_CHECK_ERROR:
2402 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2403 skey = SSD_KEY_MEDIUM_ERROR;
2404 asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
2405 ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
2407 case NVME_SC_COMPARE_FAILURE:
2408 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2409 skey = SSD_KEY_MISCOMPARE;
2410 asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
2411 ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2413 case NVME_SC_ACCESS_DENIED:
2414 status = MPI2_SCSI_STATUS_CHECK_CONDITION;
2415 skey = SSD_KEY_ILLEGAL_REQUEST;
2416 asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
2417 ascq = SCSI_ASCQ_INVALID_LUN_ID;
2423 returned_sense_len = sizeof(struct scsi_sense_data);
2424 if (returned_sense_len < ccb->csio.sense_len)
2425 ccb->csio.sense_resid = ccb->csio.sense_len -
2428 ccb->csio.sense_resid = 0;
2430 scsi_set_sense_data(&ccb->csio.sense_data, SSD_TYPE_FIXED,
2431 1, skey, asc, ascq, SSD_ELEM_NONE);
2432 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2437 /** mprsas_complete_nvme_unmap
2439 * Complete native NVMe command issued using NVMe Encapsulated
2443 mprsas_complete_nvme_unmap(struct mpr_softc *sc, struct mpr_command *cm)
2445 Mpi26NVMeEncapsulatedErrorReply_t *mpi_reply;
2446 struct nvme_completion *nvme_completion = NULL;
2447 u8 scsi_status = MPI2_SCSI_STATUS_GOOD;
2449 mpi_reply =(Mpi26NVMeEncapsulatedErrorReply_t *)cm->cm_reply;
2450 if (le16toh(mpi_reply->ErrorResponseCount)){
2451 nvme_completion = (struct nvme_completion *)cm->cm_sense;
2452 scsi_status = mprsas_nvme_trans_status_code(
2453 nvme_completion->status, cm);
2459 mprsas_scsiio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2461 MPI2_SCSI_IO_REPLY *rep;
2463 struct ccb_scsiio *csio;
2464 struct mprsas_softc *sassc;
2465 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2466 u8 *TLR_bits, TLR_on, *scsi_cdb;
2469 struct mprsas_target *target;
2470 target_id_t target_id;
2473 mpr_dprint(sc, MPR_TRACE,
2474 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2475 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2476 cm->cm_targ->outstanding);
2478 callout_stop(&cm->cm_callout);
2479 mtx_assert(&sc->mpr_mtx, MA_OWNED);
2482 ccb = cm->cm_complete_data;
2484 target_id = csio->ccb_h.target_id;
2485 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2487 * XXX KDM if the chain allocation fails, does it matter if we do
2488 * the sync and unload here? It is simpler to do it in every case,
2489 * assuming it doesn't cause problems.
2491 if (cm->cm_data != NULL) {
2492 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN)
2493 dir = BUS_DMASYNC_POSTREAD;
2494 else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT)
2495 dir = BUS_DMASYNC_POSTWRITE;
2496 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2497 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2500 cm->cm_targ->completed++;
2501 cm->cm_targ->outstanding--;
2502 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2503 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2505 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT) {
2506 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2507 cm->cm_state = MPR_CM_STATE_BUSY;
2508 if (cm->cm_reply != NULL)
2509 mprsas_log_command(cm, MPR_RECOVERY,
2510 "completed timedout cm %p ccb %p during recovery "
2511 "ioc %x scsi %x state %x xfer %u\n", cm, cm->cm_ccb,
2512 le16toh(rep->IOCStatus), rep->SCSIStatus,
2513 rep->SCSIState, le32toh(rep->TransferCount));
2515 mprsas_log_command(cm, MPR_RECOVERY,
2516 "completed timedout cm %p ccb %p during recovery\n",
2518 } else if (cm->cm_targ->tm != NULL) {
2519 if (cm->cm_reply != NULL)
2520 mprsas_log_command(cm, MPR_RECOVERY,
2521 "completed cm %p ccb %p during recovery "
2522 "ioc %x scsi %x state %x xfer %u\n",
2523 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2524 rep->SCSIStatus, rep->SCSIState,
2525 le32toh(rep->TransferCount));
2527 mprsas_log_command(cm, MPR_RECOVERY,
2528 "completed cm %p ccb %p during recovery\n",
2530 } else if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0) {
2531 mprsas_log_command(cm, MPR_RECOVERY,
2532 "reset completed cm %p ccb %p\n", cm, cm->cm_ccb);
2535 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2537 * We ran into an error after we tried to map the command,
2538 * so we're getting a callback without queueing the command
2539 * to the hardware. So we set the status here, and it will
2540 * be retained below. We'll go through the "fast path",
2541 * because there can be no reply when we haven't actually
2542 * gone out to the hardware.
2544 mprsas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2547 * Currently the only error included in the mask is
2548 * MPR_CM_FLAGS_CHAIN_FAILED, which means we're out of
2549 * chain frames. We need to freeze the queue until we get
2550 * a command that completed without this error, which will
2551 * hopefully have some chain frames attached that we can
2552 * use. If we wanted to get smarter about it, we would
2553 * only unfreeze the queue in this condition when we're
2554 * sure that we're getting some chain frames back. That's
2555 * probably unnecessary.
2557 if ((sassc->flags & MPRSAS_QUEUE_FROZEN) == 0) {
2558 xpt_freeze_simq(sassc->sim, 1);
2559 sassc->flags |= MPRSAS_QUEUE_FROZEN;
2560 mpr_dprint(sc, MPR_XINFO, "Error sending command, "
2561 "freezing SIM queue\n");
2566 * Point to the SCSI CDB, which is dependent on the CAM_CDB_POINTER
2567 * flag, and use it in a few places in the rest of this function for
2568 * convenience. Use the macro if available.
2570 #if __FreeBSD_version >= 1100103
2571 scsi_cdb = scsiio_cdb_ptr(csio);
2573 if (csio->ccb_h.flags & CAM_CDB_POINTER)
2574 scsi_cdb = csio->cdb_io.cdb_ptr;
2576 scsi_cdb = csio->cdb_io.cdb_bytes;
2580 * If this is a Start Stop Unit command and it was issued by the driver
2581 * during shutdown, decrement the refcount to account for all of the
2582 * commands that were sent. All SSU commands should be completed before
2583 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2586 if (sc->SSU_started && (scsi_cdb[0] == START_STOP_UNIT)) {
2587 mpr_dprint(sc, MPR_INFO, "Decrementing SSU count.\n");
2591 /* Take the fast path to completion */
2592 if (cm->cm_reply == NULL) {
2593 if (mprsas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2594 if ((sc->mpr_flags & MPR_FLAGS_DIAGRESET) != 0)
2595 mprsas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2597 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2598 csio->scsi_status = SCSI_STATUS_OK;
2600 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2601 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2602 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2603 mpr_dprint(sc, MPR_XINFO,
2604 "Unfreezing SIM queue\n");
2609 * There are two scenarios where the status won't be
2610 * CAM_REQ_CMP. The first is if MPR_CM_FLAGS_ERROR_MASK is
2611 * set, the second is in the MPR_FLAGS_DIAGRESET above.
2613 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2615 * Freeze the dev queue so that commands are
2616 * executed in the correct order after error
2619 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2620 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2622 mpr_free_command(sc, cm);
2627 target = &sassc->targets[target_id];
2628 if (scsi_cdb[0] == UNMAP &&
2630 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) {
2631 rep->SCSIStatus = mprsas_complete_nvme_unmap(sc, cm);
2632 csio->scsi_status = rep->SCSIStatus;
2635 mprsas_log_command(cm, MPR_XINFO,
2636 "ioc %x scsi %x state %x xfer %u\n",
2637 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2638 le32toh(rep->TransferCount));
2640 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2641 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2642 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2644 case MPI2_IOCSTATUS_SUCCESS:
2645 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2646 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2647 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2648 mprsas_log_command(cm, MPR_XINFO, "recovered error\n");
2650 /* Completion failed at the transport level. */
2651 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2652 MPI2_SCSI_STATE_TERMINATED)) {
2653 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2657 /* In a modern packetized environment, an autosense failure
2658 * implies that there's not much else that can be done to
2659 * recover the command.
2661 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2662 mprsas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2667 * CAM doesn't care about SAS Response Info data, but if this is
2668 * the state check if TLR should be done. If not, clear the
2669 * TLR_bits for the target.
2671 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2672 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE)
2673 == MPR_SCSI_RI_INVALID_FRAME)) {
2674 sc->mapping_table[target_id].TLR_bits =
2675 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2679 * Intentionally override the normal SCSI status reporting
2680 * for these two cases. These are likely to happen in a
2681 * multi-initiator environment, and we want to make sure that
2682 * CAM retries these commands rather than fail them.
2684 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2685 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2686 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2690 /* Handle normal status and sense */
2691 csio->scsi_status = rep->SCSIStatus;
2692 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2693 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2695 mprsas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2697 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2698 int sense_len, returned_sense_len;
2700 returned_sense_len = min(le32toh(rep->SenseCount),
2701 sizeof(struct scsi_sense_data));
2702 if (returned_sense_len < csio->sense_len)
2703 csio->sense_resid = csio->sense_len -
2706 csio->sense_resid = 0;
2708 sense_len = min(returned_sense_len,
2709 csio->sense_len - csio->sense_resid);
2710 bzero(&csio->sense_data, sizeof(csio->sense_data));
2711 bcopy(cm->cm_sense, &csio->sense_data, sense_len);
2712 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2716 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2717 * and it's page code 0 (Supported Page List), and there is
2718 * inquiry data, and this is for a sequential access device, and
2719 * the device is an SSP target, and TLR is supported by the
2720 * controller, turn the TLR_bits value ON if page 0x90 is
2723 if ((scsi_cdb[0] == INQUIRY) &&
2724 (scsi_cdb[1] & SI_EVPD) &&
2725 (scsi_cdb[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2726 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2727 (csio->data_ptr != NULL) &&
2728 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2729 (sc->control_TLR) &&
2730 (sc->mapping_table[target_id].device_info &
2731 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2732 vpd_list = (struct scsi_vpd_supported_page_list *)
2734 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2735 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2736 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2737 alloc_len = ((u16)scsi_cdb[3] << 8) + scsi_cdb[4];
2738 alloc_len -= csio->resid;
2739 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2740 if (vpd_list->list[i] == 0x90) {
2748 * If this is a SATA direct-access end device, mark it so that
2749 * a SCSI StartStopUnit command will be sent to it when the
2750 * driver is being shutdown.
2752 if ((scsi_cdb[0] == INQUIRY) &&
2753 (csio->data_ptr != NULL) &&
2754 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2755 (sc->mapping_table[target_id].device_info &
2756 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2757 ((sc->mapping_table[target_id].device_info &
2758 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2759 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2760 target = &sassc->targets[target_id];
2761 target->supports_SSU = TRUE;
2762 mpr_dprint(sc, MPR_XINFO, "Target %d supports SSU\n",
2766 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2767 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2769 * If devinfo is 0 this will be a volume. In that case don't
2770 * tell CAM that the volume is not there. We want volumes to
2771 * be enumerated until they are deleted/removed, not just
2774 if (cm->cm_targ->devinfo == 0)
2775 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2777 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2779 case MPI2_IOCSTATUS_INVALID_SGL:
2780 mpr_print_scsiio_cmd(sc, cm);
2781 mprsas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2783 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2785 * This is one of the responses that comes back when an I/O
2786 * has been aborted. If it is because of a timeout that we
2787 * initiated, just set the status to CAM_CMD_TIMEOUT.
2788 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2789 * command is the same (it gets retried, subject to the
2790 * retry counter), the only difference is what gets printed
2793 if (cm->cm_state == MPR_CM_STATE_TIMEDOUT)
2794 mprsas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2796 mprsas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2798 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2799 /* resid is ignored for this condition */
2801 mprsas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2803 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2804 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2806 * These can sometimes be transient transport-related
2807 * errors, and sometimes persistent drive-related errors.
2808 * We used to retry these without decrementing the retry
2809 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2810 * we hit a persistent drive problem that returns one of
2811 * these error codes, we would retry indefinitely. So,
2812 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2813 * count and avoid infinite retries. We're taking the
2814 * potential risk of flagging false failures in the event
2815 * of a topology-related error (e.g. a SAS expander problem
2816 * causes a command addressed to a drive to fail), but
2817 * avoiding getting into an infinite retry loop.
2819 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2820 mpr_dprint(sc, MPR_INFO,
2821 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2822 mpr_describe_table(mpr_iocstatus_string,
2823 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2824 target_id, cm->cm_desc.Default.SMID,
2825 le32toh(rep->IOCLogInfo));
2826 mpr_dprint(sc, MPR_XINFO,
2827 "SCSIStatus %x SCSIState %x xfercount %u\n",
2828 rep->SCSIStatus, rep->SCSIState,
2829 le32toh(rep->TransferCount));
2831 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2832 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2833 case MPI2_IOCSTATUS_INVALID_VPID:
2834 case MPI2_IOCSTATUS_INVALID_FIELD:
2835 case MPI2_IOCSTATUS_INVALID_STATE:
2836 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2837 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2838 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2839 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2840 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2842 mprsas_log_command(cm, MPR_XINFO,
2843 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2844 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2845 rep->SCSIStatus, rep->SCSIState,
2846 le32toh(rep->TransferCount));
2847 csio->resid = cm->cm_length;
2849 if (scsi_cdb[0] == UNMAP &&
2851 (csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR)
2852 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2854 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2859 mpr_sc_failed_io_info(sc, csio, rep, cm->cm_targ);
2861 if (sassc->flags & MPRSAS_QUEUE_FROZEN) {
2862 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2863 sassc->flags &= ~MPRSAS_QUEUE_FROZEN;
2864 mpr_dprint(sc, MPR_XINFO, "Command completed, unfreezing SIM "
2868 if (mprsas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2869 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2870 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2873 mpr_free_command(sc, cm);
2877 #if __FreeBSD_version >= 900026
2879 mprsas_smpio_complete(struct mpr_softc *sc, struct mpr_command *cm)
2881 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2882 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2886 ccb = cm->cm_complete_data;
2889 * Currently there should be no way we can hit this case. It only
2890 * happens when we have a failure to allocate chain frames, and SMP
2891 * commands require two S/G elements only. That should be handled
2892 * in the standard request size.
2894 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
2895 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x on SMP "
2896 "request!\n", __func__, cm->cm_flags);
2897 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2901 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2903 mpr_dprint(sc, MPR_ERROR, "%s: NULL cm_reply!\n", __func__);
2904 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2908 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2909 sasaddr = le32toh(req->SASAddress.Low);
2910 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2912 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2913 MPI2_IOCSTATUS_SUCCESS ||
2914 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2915 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2916 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2917 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2921 mpr_dprint(sc, MPR_XINFO, "%s: SMP request to SAS address %#jx "
2922 "completed successfully\n", __func__, (uintmax_t)sasaddr);
2924 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2925 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
2927 mprsas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2931 * We sync in both directions because we had DMAs in the S/G list
2932 * in both directions.
2934 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2935 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2936 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2937 mpr_free_command(sc, cm);
2942 mprsas_send_smpcmd(struct mprsas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2944 struct mpr_command *cm;
2945 uint8_t *request, *response;
2946 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2947 struct mpr_softc *sc;
2955 #if (__FreeBSD_version >= 1000028) || \
2956 ((__FreeBSD_version >= 902001) && (__FreeBSD_version < 1000000))
2957 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
2958 case CAM_DATA_PADDR:
2959 case CAM_DATA_SG_PADDR:
2961 * XXX We don't yet support physical addresses here.
2963 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
2964 "supported\n", __func__);
2965 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2970 * The chip does not support more than one buffer for the
2971 * request or response.
2973 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2974 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2975 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
2976 "response buffer segments not supported for SMP\n",
2978 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2984 * The CAM_SCATTER_VALID flag was originally implemented
2985 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2986 * We have two. So, just take that flag to mean that we
2987 * might have S/G lists, and look at the S/G segment count
2988 * to figure out whether that is the case for each individual
2991 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2992 bus_dma_segment_t *req_sg;
2994 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2995 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2997 request = ccb->smpio.smp_request;
2999 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3000 bus_dma_segment_t *rsp_sg;
3002 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3003 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3005 response = ccb->smpio.smp_response;
3007 case CAM_DATA_VADDR:
3008 request = ccb->smpio.smp_request;
3009 response = ccb->smpio.smp_response;
3012 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3016 #else /* __FreeBSD_version < 1000028 */
3018 * XXX We don't yet support physical addresses here.
3020 if (ccb->ccb_h.flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) {
3021 mpr_dprint(sc, MPR_ERROR, "%s: physical addresses not "
3022 "supported\n", __func__);
3023 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3029 * If the user wants to send an S/G list, check to make sure they
3030 * have single buffers.
3032 if (ccb->ccb_h.flags & CAM_SCATTER_VALID) {
3034 * The chip does not support more than one buffer for the
3035 * request or response.
3037 if ((ccb->smpio.smp_request_sglist_cnt > 1)
3038 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
3039 mpr_dprint(sc, MPR_ERROR, "%s: multiple request or "
3040 "response buffer segments not supported for SMP\n",
3042 mprsas_set_ccbstatus(ccb, CAM_REQ_INVALID);
3048 * The CAM_SCATTER_VALID flag was originally implemented
3049 * for the XPT_SCSI_IO CCB, which only has one data pointer.
3050 * We have two. So, just take that flag to mean that we
3051 * might have S/G lists, and look at the S/G segment count
3052 * to figure out whether that is the case for each individual
3055 if (ccb->smpio.smp_request_sglist_cnt != 0) {
3056 bus_dma_segment_t *req_sg;
3058 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
3059 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
3061 request = ccb->smpio.smp_request;
3063 if (ccb->smpio.smp_response_sglist_cnt != 0) {
3064 bus_dma_segment_t *rsp_sg;
3066 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
3067 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
3069 response = ccb->smpio.smp_response;
3071 request = ccb->smpio.smp_request;
3072 response = ccb->smpio.smp_response;
3074 #endif /* __FreeBSD_version < 1000028 */
3076 cm = mpr_alloc_command(sc);
3078 mpr_dprint(sc, MPR_ERROR, "%s: cannot allocate command\n",
3080 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3085 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
3086 bzero(req, sizeof(*req));
3087 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
3089 /* Allow the chip to use any route to this SAS address. */
3090 req->PhysicalPort = 0xff;
3092 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
3094 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
3096 mpr_dprint(sc, MPR_XINFO, "%s: sending SMP request to SAS address "
3097 "%#jx\n", __func__, (uintmax_t)sasaddr);
3099 mpr_init_sge(cm, req, &req->SGL);
3102 * Set up a uio to pass into mpr_map_command(). This allows us to
3103 * do one map command, and one busdma call in there.
3105 cm->cm_uio.uio_iov = cm->cm_iovec;
3106 cm->cm_uio.uio_iovcnt = 2;
3107 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
3110 * The read/write flag isn't used by busdma, but set it just in
3111 * case. This isn't exactly accurate, either, since we're going in
3114 cm->cm_uio.uio_rw = UIO_WRITE;
3116 cm->cm_iovec[0].iov_base = request;
3117 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
3118 cm->cm_iovec[1].iov_base = response;
3119 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
3121 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
3122 cm->cm_iovec[1].iov_len;
3125 * Trigger a warning message in mpr_data_cb() for the user if we
3126 * wind up exceeding two S/G segments. The chip expects one
3127 * segment for the request and another for the response.
3129 cm->cm_max_segs = 2;
3131 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3132 cm->cm_complete = mprsas_smpio_complete;
3133 cm->cm_complete_data = ccb;
3136 * Tell the mapping code that we're using a uio, and that this is
3137 * an SMP passthrough request. There is a little special-case
3138 * logic there (in mpr_data_cb()) to handle the bidirectional
3141 cm->cm_flags |= MPR_CM_FLAGS_USE_UIO | MPR_CM_FLAGS_SMP_PASS |
3142 MPR_CM_FLAGS_DATAIN | MPR_CM_FLAGS_DATAOUT;
3144 /* The chip data format is little endian. */
3145 req->SASAddress.High = htole32(sasaddr >> 32);
3146 req->SASAddress.Low = htole32(sasaddr);
3149 * XXX Note that we don't have a timeout/abort mechanism here.
3150 * From the manual, it looks like task management requests only
3151 * work for SCSI IO and SATA passthrough requests. We may need to
3152 * have a mechanism to retry requests in the event of a chip reset
3153 * at least. Hopefully the chip will insure that any errors short
3154 * of that are relayed back to the driver.
3156 error = mpr_map_command(sc, cm);
3157 if ((error != 0) && (error != EINPROGRESS)) {
3158 mpr_dprint(sc, MPR_ERROR, "%s: error %d returned from "
3159 "mpr_map_command()\n", __func__, error);
3166 mpr_free_command(sc, cm);
3167 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3173 mprsas_action_smpio(struct mprsas_softc *sassc, union ccb *ccb)
3175 struct mpr_softc *sc;
3176 struct mprsas_target *targ;
3177 uint64_t sasaddr = 0;
3182 * Make sure the target exists.
3184 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3185 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
3186 targ = &sassc->targets[ccb->ccb_h.target_id];
3187 if (targ->handle == 0x0) {
3188 mpr_dprint(sc, MPR_ERROR, "%s: target %d does not exist!\n",
3189 __func__, ccb->ccb_h.target_id);
3190 mprsas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
3196 * If this device has an embedded SMP target, we'll talk to it
3198 * figure out what the expander's address is.
3200 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
3201 sasaddr = targ->sasaddr;
3204 * If we don't have a SAS address for the expander yet, try
3205 * grabbing it from the page 0x83 information cached in the
3206 * transport layer for this target. LSI expanders report the
3207 * expander SAS address as the port-associated SAS address in
3208 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
3211 * XXX KDM disable this for now, but leave it commented out so that
3212 * it is obvious that this is another possible way to get the SAS
3215 * The parent handle method below is a little more reliable, and
3216 * the other benefit is that it works for devices other than SES
3217 * devices. So you can send a SMP request to a da(4) device and it
3218 * will get routed to the expander that device is attached to.
3219 * (Assuming the da(4) device doesn't contain an SMP target...)
3223 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
3227 * If we still don't have a SAS address for the expander, look for
3228 * the parent device of this device, which is probably the expander.
3231 #ifdef OLD_MPR_PROBE
3232 struct mprsas_target *parent_target;
3235 if (targ->parent_handle == 0x0) {
3236 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3237 "a valid parent handle!\n", __func__, targ->handle);
3238 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3241 #ifdef OLD_MPR_PROBE
3242 parent_target = mprsas_find_target_by_handle(sassc, 0,
3243 targ->parent_handle);
3245 if (parent_target == NULL) {
3246 mpr_dprint(sc, MPR_ERROR, "%s: handle %d does not have "
3247 "a valid parent target!\n", __func__, targ->handle);
3248 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3252 if ((parent_target->devinfo &
3253 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3254 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3255 "does not have an SMP target!\n", __func__,
3256 targ->handle, parent_target->handle);
3257 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3261 sasaddr = parent_target->sasaddr;
3262 #else /* OLD_MPR_PROBE */
3263 if ((targ->parent_devinfo &
3264 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3265 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent %d "
3266 "does not have an SMP target!\n", __func__,
3267 targ->handle, targ->parent_handle);
3268 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3272 if (targ->parent_sasaddr == 0x0) {
3273 mpr_dprint(sc, MPR_ERROR, "%s: handle %d parent handle "
3274 "%d does not have a valid SAS address!\n", __func__,
3275 targ->handle, targ->parent_handle);
3276 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3280 sasaddr = targ->parent_sasaddr;
3281 #endif /* OLD_MPR_PROBE */
3286 mpr_dprint(sc, MPR_INFO, "%s: unable to find SAS address for "
3287 "handle %d\n", __func__, targ->handle);
3288 mprsas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3291 mprsas_send_smpcmd(sassc, ccb, sasaddr);
3299 #endif //__FreeBSD_version >= 900026
3302 mprsas_action_resetdev(struct mprsas_softc *sassc, union ccb *ccb)
3304 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3305 struct mpr_softc *sc;
3306 struct mpr_command *tm;
3307 struct mprsas_target *targ;
3309 MPR_FUNCTRACE(sassc->sc);
3310 mtx_assert(&sassc->sc->mpr_mtx, MA_OWNED);
3312 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out of "
3313 "bounds in XPT_RESET_DEV\n", ccb->ccb_h.target_id));
3315 tm = mprsas_alloc_tm(sc);
3317 mpr_dprint(sc, MPR_ERROR, "command alloc failure in "
3318 "mprsas_action_resetdev\n");
3319 mprsas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3324 targ = &sassc->targets[ccb->ccb_h.target_id];
3325 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3326 req->DevHandle = htole16(targ->handle);
3327 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3328 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3330 /* SAS Hard Link Reset / SATA Link Reset */
3331 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3334 tm->cm_complete = mprsas_resetdev_complete;
3335 tm->cm_complete_data = ccb;
3337 mpr_dprint(sc, MPR_INFO, "%s: Sending reset for target ID %d\n",
3338 __func__, targ->tid);
3341 mprsas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
3342 mpr_map_command(sc, tm);
3346 mprsas_resetdev_complete(struct mpr_softc *sc, struct mpr_command *tm)
3348 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3352 mtx_assert(&sc->mpr_mtx, MA_OWNED);
3354 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3355 ccb = tm->cm_complete_data;
3358 * Currently there should be no way we can hit this case. It only
3359 * happens when we have a failure to allocate chain frames, and
3360 * task management commands don't have S/G lists.
3362 if ((tm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3363 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3365 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3367 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for reset of "
3368 "handle %#04x! This should not happen!\n", __func__,
3369 tm->cm_flags, req->DevHandle);
3370 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3374 mpr_dprint(sc, MPR_XINFO, "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n",
3375 __func__, le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3377 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3378 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP);
3379 mprsas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3383 mprsas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3387 mprsas_free_tm(sc, tm);
3392 mprsas_poll(struct cam_sim *sim)
3394 struct mprsas_softc *sassc;
3396 sassc = cam_sim_softc(sim);
3398 if (sassc->sc->mpr_debug & MPR_TRACE) {
3399 /* frequent debug messages during a panic just slow
3400 * everything down too much.
3402 mpr_dprint(sassc->sc, MPR_XINFO, "%s clearing MPR_TRACE\n",
3404 sassc->sc->mpr_debug &= ~MPR_TRACE;
3407 mpr_intr_locked(sassc->sc);
3411 mprsas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3414 struct mpr_softc *sc;
3416 sc = (struct mpr_softc *)callback_arg;
3419 #if (__FreeBSD_version >= 1000006) || \
3420 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3421 case AC_ADVINFO_CHANGED: {
3422 struct mprsas_target *target;
3423 struct mprsas_softc *sassc;
3424 struct scsi_read_capacity_data_long rcap_buf;
3425 struct ccb_dev_advinfo cdai;
3426 struct mprsas_lun *lun;
3431 buftype = (uintptr_t)arg;
3437 * We're only interested in read capacity data changes.
3439 if (buftype != CDAI_TYPE_RCAPLONG)
3443 * See the comment in mpr_attach_sas() for a detailed
3444 * explanation. In these versions of FreeBSD we register
3445 * for all events and filter out the events that don't
3448 #if (__FreeBSD_version < 1000703) || \
3449 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3450 if (xpt_path_path_id(path) != sassc->sim->path_id)
3455 * We should have a handle for this, but check to make sure.
3457 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3458 ("Target %d out of bounds in mprsas_async\n",
3459 xpt_path_target_id(path)));
3460 target = &sassc->targets[xpt_path_target_id(path)];
3461 if (target->handle == 0)
3464 lunid = xpt_path_lun_id(path);
3466 SLIST_FOREACH(lun, &target->luns, lun_link) {
3467 if (lun->lun_id == lunid) {
3473 if (found_lun == 0) {
3474 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3477 mpr_dprint(sc, MPR_ERROR, "Unable to alloc "
3478 "LUN for EEDP support.\n");
3481 lun->lun_id = lunid;
3482 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3485 bzero(&rcap_buf, sizeof(rcap_buf));
3486 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3487 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3488 cdai.ccb_h.flags = CAM_DIR_IN;
3489 cdai.buftype = CDAI_TYPE_RCAPLONG;
3490 #if (__FreeBSD_version >= 1100061) || \
3491 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3492 cdai.flags = CDAI_FLAG_NONE;
3496 cdai.bufsiz = sizeof(rcap_buf);
3497 cdai.buf = (uint8_t *)&rcap_buf;
3498 xpt_action((union ccb *)&cdai);
3499 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3500 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
3502 if ((mprsas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3503 && (rcap_buf.prot & SRC16_PROT_EN)) {
3504 switch (rcap_buf.prot & SRC16_P_TYPE) {
3507 lun->eedp_formatted = TRUE;
3508 lun->eedp_block_size =
3509 scsi_4btoul(rcap_buf.length);
3513 lun->eedp_formatted = FALSE;
3514 lun->eedp_block_size = 0;
3518 lun->eedp_formatted = FALSE;
3519 lun->eedp_block_size = 0;
3524 case AC_FOUND_DEVICE: {
3525 struct ccb_getdev *cgd;
3528 * See the comment in mpr_attach_sas() for a detailed
3529 * explanation. In these versions of FreeBSD we register
3530 * for all events and filter out the events that don't
3533 #if (__FreeBSD_version < 1000703) || \
3534 ((__FreeBSD_version >= 1100000) && (__FreeBSD_version < 1100002))
3535 if (xpt_path_path_id(path) != sc->sassc->sim->path_id)
3540 #if (__FreeBSD_version < 901503) || \
3541 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3542 mprsas_check_eedp(sc, path, cgd);
3551 #if (__FreeBSD_version < 901503) || \
3552 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3554 mprsas_check_eedp(struct mpr_softc *sc, struct cam_path *path,
3555 struct ccb_getdev *cgd)
3557 struct mprsas_softc *sassc = sc->sassc;
3558 struct ccb_scsiio *csio;
3559 struct scsi_read_capacity_16 *scsi_cmd;
3560 struct scsi_read_capacity_eedp *rcap_buf;
3562 target_id_t targetid;
3565 struct cam_path *local_path;
3566 struct mprsas_target *target;
3567 struct mprsas_lun *lun;
3571 pathid = cam_sim_path(sassc->sim);
3572 targetid = xpt_path_target_id(path);
3573 lunid = xpt_path_lun_id(path);
3575 KASSERT(targetid < sassc->maxtargets, ("Target %d out of bounds in "
3576 "mprsas_check_eedp\n", targetid));
3577 target = &sassc->targets[targetid];
3578 if (target->handle == 0x0)
3582 * Determine if the device is EEDP capable.
3584 * If this flag is set in the inquiry data, the device supports
3585 * protection information, and must support the 16 byte read capacity
3586 * command, otherwise continue without sending read cap 16.
3588 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3592 * Issue a READ CAPACITY 16 command. This info is used to determine if
3593 * the LUN is formatted for EEDP support.
3595 ccb = xpt_alloc_ccb_nowait();
3597 mpr_dprint(sc, MPR_ERROR, "Unable to alloc CCB for EEDP "
3602 if (xpt_create_path(&local_path, xpt_periph, pathid, targetid, lunid) !=
3604 mpr_dprint(sc, MPR_ERROR, "Unable to create path for EEDP "
3611 * If LUN is already in list, don't create a new one.
3614 SLIST_FOREACH(lun, &target->luns, lun_link) {
3615 if (lun->lun_id == lunid) {
3621 lun = malloc(sizeof(struct mprsas_lun), M_MPR,
3624 mpr_dprint(sc, MPR_ERROR, "Unable to alloc LUN for "
3626 xpt_free_path(local_path);
3630 lun->lun_id = lunid;
3631 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3634 xpt_path_string(local_path, path_str, sizeof(path_str));
3635 mpr_dprint(sc, MPR_INFO, "Sending read cap: path %s handle %d\n",
3636 path_str, target->handle);
3639 * Issue a READ CAPACITY 16 command for the LUN. The
3640 * mprsas_read_cap_done function will load the read cap info into the
3643 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), M_MPR,
3645 if (rcap_buf == NULL) {
3646 mpr_dprint(sc, MPR_ERROR, "Unable to alloc read capacity "
3647 "buffer for EEDP support.\n");
3648 xpt_free_path(ccb->ccb_h.path);
3652 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3654 csio->ccb_h.func_code = XPT_SCSI_IO;
3655 csio->ccb_h.flags = CAM_DIR_IN;
3656 csio->ccb_h.retry_count = 4;
3657 csio->ccb_h.cbfcnp = mprsas_read_cap_done;
3658 csio->ccb_h.timeout = 60000;
3659 csio->data_ptr = (uint8_t *)rcap_buf;
3660 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3661 csio->sense_len = MPR_SENSE_LEN;
3662 csio->cdb_len = sizeof(*scsi_cmd);
3663 csio->tag_action = MSG_SIMPLE_Q_TAG;
3665 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3666 bzero(scsi_cmd, sizeof(*scsi_cmd));
3667 scsi_cmd->opcode = 0x9E;
3668 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3669 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3671 ccb->ccb_h.ppriv_ptr1 = sassc;
3676 mprsas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3678 struct mprsas_softc *sassc;
3679 struct mprsas_target *target;
3680 struct mprsas_lun *lun;
3681 struct scsi_read_capacity_eedp *rcap_buf;
3683 if (done_ccb == NULL)
3686 /* Driver need to release devq, it Scsi command is
3687 * generated by driver internally.
3688 * Currently there is a single place where driver
3689 * calls scsi command internally. In future if driver
3690 * calls more scsi command internally, it needs to release
3691 * devq internally, since those command will not go back to
3694 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3695 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3696 xpt_release_devq(done_ccb->ccb_h.path,
3697 /*count*/ 1, /*run_queue*/TRUE);
3700 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3703 * Get the LUN ID for the path and look it up in the LUN list for the
3706 sassc = (struct mprsas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3707 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets, ("Target %d out "
3708 "of bounds in mprsas_read_cap_done\n", done_ccb->ccb_h.target_id));
3709 target = &sassc->targets[done_ccb->ccb_h.target_id];
3710 SLIST_FOREACH(lun, &target->luns, lun_link) {
3711 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3715 * Got the LUN in the target's LUN list. Fill it in with EEDP
3716 * info. If the READ CAP 16 command had some SCSI error (common
3717 * if command is not supported), mark the lun as not supporting
3718 * EEDP and set the block size to 0.
3720 if ((mprsas_get_ccbstatus(done_ccb) != CAM_REQ_CMP) ||
3721 (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3722 lun->eedp_formatted = FALSE;
3723 lun->eedp_block_size = 0;
3727 if (rcap_buf->protect & 0x01) {
3728 mpr_dprint(sassc->sc, MPR_INFO, "LUN %d for target ID "
3729 "%d is formatted for EEDP support.\n",
3730 done_ccb->ccb_h.target_lun,
3731 done_ccb->ccb_h.target_id);
3732 lun->eedp_formatted = TRUE;
3733 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3738 // Finished with this CCB and path.
3739 free(rcap_buf, M_MPR);
3740 xpt_free_path(done_ccb->ccb_h.path);
3741 xpt_free_ccb(done_ccb);
3743 #endif /* (__FreeBSD_version < 901503) || \
3744 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3747 * Set the INRESET flag for this target so that no I/O will be sent to
3748 * the target until the reset has completed. If an I/O request does
3749 * happen, the devq will be frozen. The CCB holds the path which is
3750 * used to release the devq. The devq is released and the CCB is freed
3751 * when the TM completes.
3754 mprsas_prepare_for_tm(struct mpr_softc *sc, struct mpr_command *tm,
3755 struct mprsas_target *target, lun_id_t lun_id)
3760 ccb = xpt_alloc_ccb_nowait();
3762 path_id = cam_sim_path(sc->sassc->sim);
3763 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3764 target->tid, lun_id) != CAM_REQ_CMP) {
3768 tm->cm_targ = target;
3769 target->flags |= MPRSAS_TARGET_INRESET;
3775 mprsas_startup(struct mpr_softc *sc)
3778 * Send the port enable message and set the wait_for_port_enable flag.
3779 * This flag helps to keep the simq frozen until all discovery events
3782 sc->wait_for_port_enable = 1;
3783 mprsas_send_portenable(sc);
3788 mprsas_send_portenable(struct mpr_softc *sc)
3790 MPI2_PORT_ENABLE_REQUEST *request;
3791 struct mpr_command *cm;
3795 if ((cm = mpr_alloc_command(sc)) == NULL)
3797 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3798 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3799 request->MsgFlags = 0;
3801 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3802 cm->cm_complete = mprsas_portenable_complete;
3806 mpr_map_command(sc, cm);
3807 mpr_dprint(sc, MPR_XINFO,
3808 "mpr_send_portenable finished cm %p req %p complete %p\n",
3809 cm, cm->cm_req, cm->cm_complete);
3814 mprsas_portenable_complete(struct mpr_softc *sc, struct mpr_command *cm)
3816 MPI2_PORT_ENABLE_REPLY *reply;
3817 struct mprsas_softc *sassc;
3823 * Currently there should be no way we can hit this case. It only
3824 * happens when we have a failure to allocate chain frames, and
3825 * port enable commands don't have S/G lists.
3827 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) {
3828 mpr_dprint(sc, MPR_ERROR, "%s: cm_flags = %#x for port enable! "
3829 "This should not happen!\n", __func__, cm->cm_flags);
3832 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3834 mpr_dprint(sc, MPR_FAULT, "Portenable NULL reply\n");
3835 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3836 MPI2_IOCSTATUS_SUCCESS)
3837 mpr_dprint(sc, MPR_FAULT, "Portenable failed\n");
3839 mpr_free_command(sc, cm);
3841 * Done waiting for port enable to complete. Decrement the refcount.
3842 * If refcount is 0, discovery is complete and a rescan of the bus can
3845 sc->wait_for_port_enable = 0;
3846 sc->port_enable_complete = 1;
3847 wakeup(&sc->port_enable_complete);
3848 mprsas_startup_decrement(sassc);
3852 mprsas_check_id(struct mprsas_softc *sassc, int id)
3854 struct mpr_softc *sc = sassc->sc;
3858 ids = &sc->exclude_ids[0];
3859 while((name = strsep(&ids, ",")) != NULL) {
3860 if (name[0] == '\0')
3862 if (strtol(name, NULL, 0) == (long)id)
3870 mprsas_realloc_targets(struct mpr_softc *sc, int maxtargets)
3872 struct mprsas_softc *sassc;
3873 struct mprsas_lun *lun, *lun_tmp;
3874 struct mprsas_target *targ;
3879 * The number of targets is based on IOC Facts, so free all of
3880 * the allocated LUNs for each target and then the target buffer
3883 for (i=0; i< maxtargets; i++) {
3884 targ = &sassc->targets[i];
3885 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3889 free(sassc->targets, M_MPR);
3891 sassc->targets = malloc(sizeof(struct mprsas_target) * maxtargets,
3892 M_MPR, M_WAITOK|M_ZERO);
3893 if (!sassc->targets) {
3894 panic("%s failed to alloc targets with error %d\n",