2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 /* Communications core for Avago Technologies (LSI) MPT2 */
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
50 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
59 #include <machine/bus.h>
60 #include <machine/resource.h>
63 #include <machine/stdarg.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #if __FreeBSD_version >= 900026
76 #include <cam/scsi/smp_all.h>
79 #include <dev/mps/mpi/mpi2_type.h>
80 #include <dev/mps/mpi/mpi2.h>
81 #include <dev/mps/mpi/mpi2_ioc.h>
82 #include <dev/mps/mpi/mpi2_sas.h>
83 #include <dev/mps/mpi/mpi2_cnfg.h>
84 #include <dev/mps/mpi/mpi2_init.h>
85 #include <dev/mps/mpi/mpi2_tool.h>
86 #include <dev/mps/mps_ioctl.h>
87 #include <dev/mps/mpsvar.h>
88 #include <dev/mps/mps_table.h>
89 #include <dev/mps/mps_sas.h>
91 #define MPSSAS_DISCOVERY_TIMEOUT 20
92 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
95 * static array to check SCSI OpCode for EEDP protection bits
97 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
119 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
121 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
122 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
123 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mpssas_poll(struct cam_sim *sim);
125 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
126 struct mps_command *cm);
127 static void mpssas_scsiio_timeout(void *data);
128 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
129 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
130 struct mps_command *cm, union ccb *ccb);
131 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
132 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
133 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
134 #if __FreeBSD_version >= 900026
135 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
136 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
138 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
139 #endif //FreeBSD_version >= 900026
140 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
141 static void mpssas_async(void *callback_arg, uint32_t code,
142 struct cam_path *path, void *arg);
143 #if (__FreeBSD_version < 901503) || \
144 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
145 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
146 struct ccb_getdev *cgd);
147 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
149 static int mpssas_send_portenable(struct mps_softc *sc);
150 static void mpssas_portenable_complete(struct mps_softc *sc,
151 struct mps_command *cm);
153 struct mpssas_target *
154 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
156 struct mpssas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mpssas_startup_increment(struct mpssas_softc *sassc)
178 MPS_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mps_dprint(sassc->sc, MPS_INIT,
184 "%s freezing simq\n", __func__);
185 #if __FreeBSD_version >= 1000039
188 xpt_freeze_simq(sassc->sim, 1);
190 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
191 sassc->startup_refcount);
196 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
198 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
199 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
200 xpt_release_simq(sassc->sim, 1);
201 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
206 mpssas_startup_decrement(struct mpssas_softc *sassc)
208 MPS_FUNCTRACE(sassc->sc);
210 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
211 if (--sassc->startup_refcount == 0) {
212 /* finished all discovery-related actions, release
213 * the simq and rescan for the latest topology.
215 mps_dprint(sassc->sc, MPS_INIT,
216 "%s releasing simq\n", __func__);
217 sassc->flags &= ~MPSSAS_IN_STARTUP;
218 xpt_release_simq(sassc->sim, 1);
219 #if __FreeBSD_version >= 1000039
222 mpssas_rescan_target(sassc->sc, NULL);
225 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
226 sassc->startup_refcount);
230 /* The firmware requires us to stop sending commands when we're doing task
231 * management, so refcount the TMs and keep the simq frozen when any are in
235 mpssas_alloc_tm(struct mps_softc *sc)
237 struct mps_command *tm;
239 tm = mps_alloc_high_priority_command(sc);
244 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
246 int target_id = 0xFFFFFFFF;
252 * For TM's the devq is frozen for the device. Unfreeze it here and
253 * free the resources used for freezing the devq. Must clear the
254 * INRESET flag as well or scsi I/O will not work.
256 if (tm->cm_targ != NULL) {
257 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
258 target_id = tm->cm_targ->tid;
261 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
263 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
264 xpt_free_path(tm->cm_ccb->ccb_h.path);
265 xpt_free_ccb(tm->cm_ccb);
268 mps_free_high_priority_command(sc, tm);
272 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
274 struct mpssas_softc *sassc = sc->sassc;
276 target_id_t targetid;
280 pathid = cam_sim_path(sassc->sim);
282 targetid = CAM_TARGET_WILDCARD;
284 targetid = targ - sassc->targets;
287 * Allocate a CCB and schedule a rescan.
289 ccb = xpt_alloc_ccb_nowait();
291 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
295 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
296 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
297 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
302 if (targetid == CAM_TARGET_WILDCARD)
303 ccb->ccb_h.func_code = XPT_SCAN_BUS;
305 ccb->ccb_h.func_code = XPT_SCAN_TGT;
307 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
312 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
322 /* No need to be in here if debugging isn't enabled */
323 if ((cm->cm_sc->mps_debug & level) == 0)
326 sbuf_new(&sb, str, sizeof(str), 0);
330 if (cm->cm_ccb != NULL) {
331 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
333 sbuf_cat(&sb, path_str);
334 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
335 scsi_command_string(&cm->cm_ccb->csio, &sb);
336 sbuf_printf(&sb, "length %d ",
337 cm->cm_ccb->csio.dxfer_len);
341 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
342 cam_sim_name(cm->cm_sc->sassc->sim),
343 cam_sim_unit(cm->cm_sc->sassc->sim),
344 cam_sim_bus(cm->cm_sc->sassc->sim),
345 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
349 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
350 sbuf_vprintf(&sb, fmt, ap);
352 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
359 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
361 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
362 struct mpssas_target *targ;
367 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
368 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
372 /* XXX retry the remove after the diag reset completes? */
373 mps_dprint(sc, MPS_FAULT,
374 "%s NULL reply resetting device 0x%04x\n", __func__,
376 mpssas_free_tm(sc, tm);
380 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
381 MPI2_IOCSTATUS_SUCCESS) {
382 mps_dprint(sc, MPS_ERROR,
383 "IOCStatus = 0x%x while resetting device 0x%x\n",
384 le16toh(reply->IOCStatus), handle);
387 mps_dprint(sc, MPS_XINFO,
388 "Reset aborted %u commands\n", reply->TerminationCount);
389 mps_free_reply(sc, tm->cm_reply_data);
390 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
392 mps_dprint(sc, MPS_XINFO,
393 "clearing target %u handle 0x%04x\n", targ->tid, handle);
396 * Don't clear target if remove fails because things will get confusing.
397 * Leave the devname and sasaddr intact so that we know to avoid reusing
398 * this target id if possible, and so we can assign the same target id
399 * to this device if it comes back in the future.
401 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
402 MPI2_IOCSTATUS_SUCCESS) {
405 targ->encl_handle = 0x0;
406 targ->encl_slot = 0x0;
407 targ->exp_dev_handle = 0x0;
409 targ->linkrate = 0x0;
414 mpssas_free_tm(sc, tm);
419 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
420 * Otherwise Volume Delete is same as Bare Drive Removal.
423 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
425 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
426 struct mps_softc *sc;
427 struct mps_command *cm;
428 struct mpssas_target *targ = NULL;
430 MPS_FUNCTRACE(sassc->sc);
435 * If this is a WD controller, determine if the disk should be exposed
436 * to the OS or not. If disk should be exposed, return from this
437 * function without doing anything.
439 if (sc->WD_available && (sc->WD_hide_expose ==
440 MPS_WD_EXPOSE_ALWAYS)) {
445 targ = mpssas_find_target_by_handle(sassc, 0, handle);
447 /* FIXME: what is the action? */
448 /* We don't know about this device? */
449 mps_dprint(sc, MPS_ERROR,
450 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
454 targ->flags |= MPSSAS_TARGET_INREMOVAL;
456 cm = mpssas_alloc_tm(sc);
458 mps_dprint(sc, MPS_ERROR,
459 "%s: command alloc failure\n", __func__);
463 mpssas_rescan_target(sc, targ);
465 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
466 req->DevHandle = targ->handle;
467 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
468 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
470 /* SAS Hard Link Reset / SATA Link Reset */
471 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
475 cm->cm_desc.HighPriority.RequestFlags =
476 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
477 cm->cm_complete = mpssas_remove_volume;
478 cm->cm_complete_data = (void *)(uintptr_t)handle;
480 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
481 __func__, targ->tid);
482 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
484 mps_map_command(sc, cm);
488 * The MPT2 firmware performs debounce on the link to avoid transient link
489 * errors and false removals. When it does decide that link has been lost
490 * and a device need to go away, it expects that the host will perform a
491 * target reset and then an op remove. The reset has the side-effect of
492 * aborting any outstanding requests for the device, which is required for
493 * the op-remove to succeed. It's not clear if the host should check for
494 * the device coming back alive after the reset.
497 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
499 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
500 struct mps_softc *sc;
501 struct mps_command *cm;
502 struct mpssas_target *targ = NULL;
504 MPS_FUNCTRACE(sassc->sc);
508 targ = mpssas_find_target_by_handle(sassc, 0, handle);
510 /* FIXME: what is the action? */
511 /* We don't know about this device? */
512 mps_dprint(sc, MPS_ERROR,
513 "%s : invalid handle 0x%x \n", __func__, handle);
517 targ->flags |= MPSSAS_TARGET_INREMOVAL;
519 cm = mpssas_alloc_tm(sc);
521 mps_dprint(sc, MPS_ERROR,
522 "%s: command alloc failure\n", __func__);
526 mpssas_rescan_target(sc, targ);
528 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
529 memset(req, 0, sizeof(*req));
530 req->DevHandle = htole16(targ->handle);
531 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
532 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
534 /* SAS Hard Link Reset / SATA Link Reset */
535 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
539 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
540 cm->cm_complete = mpssas_remove_device;
541 cm->cm_complete_data = (void *)(uintptr_t)handle;
543 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
544 __func__, targ->tid);
545 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
547 mps_map_command(sc, cm);
551 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
553 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
554 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
555 struct mpssas_target *targ;
556 struct mps_command *next_cm;
561 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
562 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
566 * Currently there should be no way we can hit this case. It only
567 * happens when we have a failure to allocate chain frames, and
568 * task management commands don't have S/G lists.
570 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
571 mps_dprint(sc, MPS_ERROR,
572 "%s: cm_flags = %#x for remove of handle %#04x! "
573 "This should not happen!\n", __func__, tm->cm_flags,
578 /* XXX retry the remove after the diag reset completes? */
579 mps_dprint(sc, MPS_FAULT,
580 "%s NULL reply resetting device 0x%04x\n", __func__,
582 mpssas_free_tm(sc, tm);
586 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
587 MPI2_IOCSTATUS_SUCCESS) {
588 mps_dprint(sc, MPS_ERROR,
589 "IOCStatus = 0x%x while resetting device 0x%x\n",
590 le16toh(reply->IOCStatus), handle);
593 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
594 le32toh(reply->TerminationCount));
595 mps_free_reply(sc, tm->cm_reply_data);
596 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
598 /* Reuse the existing command */
599 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
600 memset(req, 0, sizeof(*req));
601 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
602 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
603 req->DevHandle = htole16(handle);
605 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
606 tm->cm_complete = mpssas_remove_complete;
607 tm->cm_complete_data = (void *)(uintptr_t)handle;
609 mps_map_command(sc, tm);
611 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
613 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
616 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
617 ccb = tm->cm_complete_data;
618 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
619 mpssas_scsiio_complete(sc, tm);
624 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
626 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
628 struct mpssas_target *targ;
629 struct mpssas_lun *lun;
633 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
634 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
637 * Currently there should be no way we can hit this case. It only
638 * happens when we have a failure to allocate chain frames, and
639 * task management commands don't have S/G lists.
641 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
642 mps_dprint(sc, MPS_XINFO,
643 "%s: cm_flags = %#x for remove of handle %#04x! "
644 "This should not happen!\n", __func__, tm->cm_flags,
646 mpssas_free_tm(sc, tm);
651 /* most likely a chip reset */
652 mps_dprint(sc, MPS_FAULT,
653 "%s NULL reply removing device 0x%04x\n", __func__, handle);
654 mpssas_free_tm(sc, tm);
658 mps_dprint(sc, MPS_XINFO,
659 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
660 handle, le16toh(reply->IOCStatus));
663 * Don't clear target if remove fails because things will get confusing.
664 * Leave the devname and sasaddr intact so that we know to avoid reusing
665 * this target id if possible, and so we can assign the same target id
666 * to this device if it comes back in the future.
668 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
669 MPI2_IOCSTATUS_SUCCESS) {
672 targ->encl_handle = 0x0;
673 targ->encl_slot = 0x0;
674 targ->exp_dev_handle = 0x0;
676 targ->linkrate = 0x0;
680 while(!SLIST_EMPTY(&targ->luns)) {
681 lun = SLIST_FIRST(&targ->luns);
682 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
688 mpssas_free_tm(sc, tm);
692 mpssas_register_events(struct mps_softc *sc)
694 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
697 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
698 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
699 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
700 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
701 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
702 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
703 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
704 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
705 setbit(events, MPI2_EVENT_IR_VOLUME);
706 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
707 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
708 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
710 mps_register_events(sc, events, mpssas_evt_handler, NULL,
711 &sc->sassc->mpssas_eh);
717 mps_attach_sas(struct mps_softc *sc)
719 struct mpssas_softc *sassc;
724 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
726 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
728 mps_dprint(sc, MPS_INIT|MPS_ERROR,
729 "Cannot allocate SAS controller memory\n");
734 * XXX MaxTargets could change during a reinit. Since we don't
735 * resize the targets[] array during such an event, cache the value
736 * of MaxTargets here so that we don't get into trouble later. This
737 * should move into the reinit logic.
739 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
740 sassc->targets = malloc(sizeof(struct mpssas_target) *
741 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
742 if(!sassc->targets) {
743 mps_dprint(sc, MPS_INIT|MPS_ERROR,
744 "Cannot allocate SAS target memory\n");
751 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) {
752 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
757 unit = device_get_unit(sc->mps_dev);
758 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
759 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq);
760 if (sassc->sim == NULL) {
761 mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
766 TAILQ_INIT(&sassc->ev_queue);
768 /* Initialize taskqueue for Event Handling */
769 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
770 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
771 taskqueue_thread_enqueue, &sassc->ev_tq);
772 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
773 device_get_nameunit(sc->mps_dev));
778 * XXX There should be a bus for every port on the adapter, but since
779 * we're just going to fake the topology for now, we'll pretend that
780 * everything is just a target on a single bus.
782 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
783 mps_dprint(sc, MPS_INIT|MPS_ERROR,
784 "Error %d registering SCSI bus\n", error);
790 * Assume that discovery events will start right away.
792 * Hold off boot until discovery is complete.
794 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
795 sc->sassc->startup_refcount = 0;
796 mpssas_startup_increment(sassc);
798 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
801 * Register for async events so we can determine the EEDP
802 * capabilities of devices.
804 status = xpt_create_path(&sassc->path, /*periph*/NULL,
805 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
807 if (status != CAM_REQ_CMP) {
808 mps_dprint(sc, MPS_ERROR|MPS_INIT,
809 "Error %#x creating sim path\n", status);
814 #if (__FreeBSD_version >= 1000006) || \
815 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
816 event = AC_ADVINFO_CHANGED;
818 event = AC_FOUND_DEVICE;
820 status = xpt_register_async(event, mpssas_async, sc,
822 if (status != CAM_REQ_CMP) {
823 mps_dprint(sc, MPS_ERROR,
824 "Error %#x registering async handler for "
825 "AC_ADVINFO_CHANGED events\n", status);
826 xpt_free_path(sassc->path);
830 if (status != CAM_REQ_CMP) {
832 * EEDP use is the exception, not the rule.
833 * Warn the user, but do not fail to attach.
835 mps_printf(sc, "EEDP capabilities disabled.\n");
840 mpssas_register_events(sc);
845 mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
850 mps_detach_sas(struct mps_softc *sc)
852 struct mpssas_softc *sassc;
853 struct mpssas_lun *lun, *lun_tmp;
854 struct mpssas_target *targ;
859 if (sc->sassc == NULL)
863 mps_deregister_events(sc, sassc->mpssas_eh);
866 * Drain and free the event handling taskqueue with the lock
867 * unheld so that any parallel processing tasks drain properly
868 * without deadlocking.
870 if (sassc->ev_tq != NULL)
871 taskqueue_free(sassc->ev_tq);
873 /* Make sure CAM doesn't wedge if we had to bail out early. */
876 while (sassc->startup_refcount != 0)
877 mpssas_startup_decrement(sassc);
879 /* Deregister our async handler */
880 if (sassc->path != NULL) {
881 xpt_register_async(0, mpssas_async, sc, sassc->path);
882 xpt_free_path(sassc->path);
886 if (sassc->flags & MPSSAS_IN_STARTUP)
887 xpt_release_simq(sassc->sim, 1);
889 if (sassc->sim != NULL) {
890 xpt_bus_deregister(cam_sim_path(sassc->sim));
891 cam_sim_free(sassc->sim, FALSE);
896 if (sassc->devq != NULL)
897 cam_simq_free(sassc->devq);
899 for(i=0; i< sassc->maxtargets ;i++) {
900 targ = &sassc->targets[i];
901 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
905 free(sassc->targets, M_MPT2);
913 mpssas_discovery_end(struct mpssas_softc *sassc)
915 struct mps_softc *sc = sassc->sc;
919 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
920 callout_stop(&sassc->discovery_callout);
923 * After discovery has completed, check the mapping table for any
924 * missing devices and update their missing counts. Only do this once
925 * whenever the driver is initialized so that missing counts aren't
926 * updated unnecessarily. Note that just because discovery has
927 * completed doesn't mean that events have been processed yet. The
928 * check_devices function is a callout timer that checks if ALL devices
929 * are missing. If so, it will wait a little longer for events to
930 * complete and keep resetting itself until some device in the mapping
931 * table is not missing, meaning that event processing has started.
933 if (sc->track_mapping_events) {
934 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
935 "completed. Check for missing devices in the mapping "
937 callout_reset(&sc->device_check_callout,
938 MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
944 mpssas_action(struct cam_sim *sim, union ccb *ccb)
946 struct mpssas_softc *sassc;
948 sassc = cam_sim_softc(sim);
950 MPS_FUNCTRACE(sassc->sc);
951 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
952 ccb->ccb_h.func_code);
953 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
955 switch (ccb->ccb_h.func_code) {
958 struct ccb_pathinq *cpi = &ccb->cpi;
959 struct mps_softc *sc = sassc->sc;
960 uint8_t sges_per_frame;
962 cpi->version_num = 1;
963 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
964 cpi->target_sprt = 0;
965 #if __FreeBSD_version >= 1000039
966 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
968 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
970 cpi->hba_eng_cnt = 0;
971 cpi->max_target = sassc->maxtargets - 1;
975 * initiator_id is set here to an ID outside the set of valid
976 * target IDs (including volumes).
978 cpi->initiator_id = sassc->maxtargets;
979 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
980 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
981 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
982 cpi->unit_number = cam_sim_unit(sim);
983 cpi->bus_id = cam_sim_bus(sim);
984 cpi->base_transfer_speed = 150000;
985 cpi->transport = XPORT_SAS;
986 cpi->transport_version = 0;
987 cpi->protocol = PROTO_SCSI;
988 cpi->protocol_version = SCSI_REV_SPC;
991 * Max IO Size is Page Size * the following:
992 * ((SGEs per frame - 1 for chain element) *
993 * Max Chain Depth) + 1 for no chain needed in last frame
995 * If user suggests a Max IO size to use, use the smaller of the
996 * user's value and the calculated value as long as the user's
997 * value is larger than 0. The user's value is in pages.
999 sges_per_frame = ((sc->facts->IOCRequestFrameSize * 4) /
1000 sizeof(MPI2_SGE_SIMPLE64)) - 1;
1001 cpi->maxio = (sges_per_frame * sc->facts->MaxChainDepth) + 1;
1002 cpi->maxio *= PAGE_SIZE;
1003 if ((sc->max_io_pages > 0) && (sc->max_io_pages * PAGE_SIZE <
1005 cpi->maxio = sc->max_io_pages * PAGE_SIZE;
1006 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1009 case XPT_GET_TRAN_SETTINGS:
1011 struct ccb_trans_settings *cts;
1012 struct ccb_trans_settings_sas *sas;
1013 struct ccb_trans_settings_scsi *scsi;
1014 struct mpssas_target *targ;
1017 sas = &cts->xport_specific.sas;
1018 scsi = &cts->proto_specific.scsi;
1020 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1021 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1022 cts->ccb_h.target_id));
1023 targ = &sassc->targets[cts->ccb_h.target_id];
1024 if (targ->handle == 0x0) {
1025 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1029 cts->protocol_version = SCSI_REV_SPC2;
1030 cts->transport = XPORT_SAS;
1031 cts->transport_version = 0;
1033 sas->valid = CTS_SAS_VALID_SPEED;
1034 switch (targ->linkrate) {
1036 sas->bitrate = 150000;
1039 sas->bitrate = 300000;
1042 sas->bitrate = 600000;
1048 cts->protocol = PROTO_SCSI;
1049 scsi->valid = CTS_SCSI_VALID_TQ;
1050 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1052 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1055 case XPT_CALC_GEOMETRY:
1056 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1057 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1060 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1061 mpssas_action_resetdev(sassc, ccb);
1066 mps_dprint(sassc->sc, MPS_XINFO,
1067 "mpssas_action faking success for abort or reset\n");
1068 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1071 mpssas_action_scsiio(sassc, ccb);
1073 #if __FreeBSD_version >= 900026
1075 mpssas_action_smpio(sassc, ccb);
1079 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1087 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1088 target_id_t target_id, lun_id_t lun_id)
1090 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1091 struct cam_path *path;
1093 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1094 ac_code, target_id, (uintmax_t)lun_id);
1096 if (xpt_create_path(&path, NULL,
1097 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1098 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1103 xpt_async(ac_code, path, NULL);
1104 xpt_free_path(path);
1108 mpssas_complete_all_commands(struct mps_softc *sc)
1110 struct mps_command *cm;
1115 mtx_assert(&sc->mps_mtx, MA_OWNED);
1117 /* complete all commands with a NULL reply */
1118 for (i = 1; i < sc->num_reqs; i++) {
1119 cm = &sc->commands[i];
1120 cm->cm_reply = NULL;
1123 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1124 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1126 if (cm->cm_complete != NULL) {
1127 mpssas_log_command(cm, MPS_RECOVERY,
1128 "completing cm %p state %x ccb %p for diag reset\n",
1129 cm, cm->cm_state, cm->cm_ccb);
1131 cm->cm_complete(sc, cm);
1135 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1136 mpssas_log_command(cm, MPS_RECOVERY,
1137 "waking up cm %p state %x ccb %p for diag reset\n",
1138 cm, cm->cm_state, cm->cm_ccb);
1143 if (cm->cm_sc->io_cmds_active != 0)
1144 cm->cm_sc->io_cmds_active--;
1146 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1147 /* this should never happen, but if it does, log */
1148 mpssas_log_command(cm, MPS_RECOVERY,
1149 "cm %p state %x flags 0x%x ccb %p during diag "
1150 "reset\n", cm, cm->cm_state, cm->cm_flags,
1157 mpssas_handle_reinit(struct mps_softc *sc)
1161 /* Go back into startup mode and freeze the simq, so that CAM
1162 * doesn't send any commands until after we've rediscovered all
1163 * targets and found the proper device handles for them.
1165 * After the reset, portenable will trigger discovery, and after all
1166 * discovery-related activities have finished, the simq will be
1169 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1170 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1171 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1172 mpssas_startup_increment(sc->sassc);
1174 /* notify CAM of a bus reset */
1175 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1178 /* complete and cleanup after all outstanding commands */
1179 mpssas_complete_all_commands(sc);
1181 mps_dprint(sc, MPS_INIT,
1182 "%s startup %u after command completion\n", __func__,
1183 sc->sassc->startup_refcount);
1185 /* zero all the target handles, since they may change after the
1186 * reset, and we have to rediscover all the targets and use the new
1189 for (i = 0; i < sc->sassc->maxtargets; i++) {
1190 if (sc->sassc->targets[i].outstanding != 0)
1191 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1192 i, sc->sassc->targets[i].outstanding);
1193 sc->sassc->targets[i].handle = 0x0;
1194 sc->sassc->targets[i].exp_dev_handle = 0x0;
1195 sc->sassc->targets[i].outstanding = 0;
1196 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1201 mpssas_tm_timeout(void *data)
1203 struct mps_command *tm = data;
1204 struct mps_softc *sc = tm->cm_sc;
1206 mtx_assert(&sc->mps_mtx, MA_OWNED);
1208 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1209 "task mgmt %p timed out\n", tm);
1214 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1216 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1217 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1218 unsigned int cm_count = 0;
1219 struct mps_command *cm;
1220 struct mpssas_target *targ;
1222 callout_stop(&tm->cm_callout);
1224 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1225 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1229 * Currently there should be no way we can hit this case. It only
1230 * happens when we have a failure to allocate chain frames, and
1231 * task management commands don't have S/G lists.
1232 * XXXSL So should it be an assertion?
1234 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1235 mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1236 "%s: cm_flags = %#x for LUN reset! "
1237 "This should not happen!\n", __func__, tm->cm_flags);
1238 mpssas_free_tm(sc, tm);
1242 if (reply == NULL) {
1243 mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1245 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1246 /* this completion was due to a reset, just cleanup */
1247 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1248 "reset, ignoring NULL LUN reset reply\n");
1250 mpssas_free_tm(sc, tm);
1253 /* we should have gotten a reply. */
1254 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1255 "LUN reset attempt, resetting controller\n");
1261 mps_dprint(sc, MPS_RECOVERY,
1262 "logical unit reset status 0x%x code 0x%x count %u\n",
1263 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1264 le32toh(reply->TerminationCount));
1267 * See if there are any outstanding commands for this LUN.
1268 * This could be made more efficient by using a per-LU data
1269 * structure of some sort.
1271 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1272 if (cm->cm_lun == tm->cm_lun)
1276 if (cm_count == 0) {
1277 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1278 "Finished recovery after LUN reset for target %u\n",
1281 mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1284 * We've finished recovery for this logical unit. check and
1285 * see if some other logical unit has a timedout command
1286 * that needs to be processed.
1288 cm = TAILQ_FIRST(&targ->timedout_commands);
1290 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1291 "More commands to abort for target %u\n",
1293 mpssas_send_abort(sc, tm, cm);
1296 mpssas_free_tm(sc, tm);
1300 * If we still have commands for this LUN, the reset
1301 * effectively failed, regardless of the status reported.
1302 * Escalate to a target reset.
1304 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1305 "logical unit reset complete for target %u, but still "
1306 "have %u command(s), sending target reset\n", targ->tid,
1308 mpssas_send_reset(sc, tm,
1309 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1314 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1316 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1317 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1318 struct mpssas_target *targ;
1320 callout_stop(&tm->cm_callout);
1322 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1323 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1327 * Currently there should be no way we can hit this case. It only
1328 * happens when we have a failure to allocate chain frames, and
1329 * task management commands don't have S/G lists.
1331 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1332 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1333 "This should not happen!\n", __func__, tm->cm_flags);
1334 mpssas_free_tm(sc, tm);
1338 if (reply == NULL) {
1339 mps_dprint(sc, MPS_RECOVERY,
1340 "NULL target reset reply for tm %pi TaskMID %u\n",
1341 tm, le16toh(req->TaskMID));
1342 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1343 /* this completion was due to a reset, just cleanup */
1344 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1345 "reset, ignoring NULL target reset reply\n");
1347 mpssas_free_tm(sc, tm);
1349 /* we should have gotten a reply. */
1350 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1351 "target reset attempt, resetting controller\n");
1357 mps_dprint(sc, MPS_RECOVERY,
1358 "target reset status 0x%x code 0x%x count %u\n",
1359 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1360 le32toh(reply->TerminationCount));
1362 if (targ->outstanding == 0) {
1363 /* we've finished recovery for this target and all
1364 * of its logical units.
1366 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1367 "Finished reset recovery for target %u\n", targ->tid);
1369 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1373 mpssas_free_tm(sc, tm);
1376 * After a target reset, if this target still has
1377 * outstanding commands, the reset effectively failed,
1378 * regardless of the status reported. escalate.
1380 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1381 "Target reset complete for target %u, but still have %u "
1382 "command(s), resetting controller\n", targ->tid,
1388 #define MPS_RESET_TIMEOUT 30
1391 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1393 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1394 struct mpssas_target *target;
1397 target = tm->cm_targ;
1398 if (target->handle == 0) {
1399 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1400 __func__, target->tid);
1404 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1405 req->DevHandle = htole16(target->handle);
1406 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1407 req->TaskType = type;
1409 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1410 /* XXX Need to handle invalid LUNs */
1411 MPS_SET_LUN(req->LUN, tm->cm_lun);
1412 tm->cm_targ->logical_unit_resets++;
1413 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1414 "Sending logical unit reset to target %u lun %d\n",
1415 target->tid, tm->cm_lun);
1416 tm->cm_complete = mpssas_logical_unit_reset_complete;
1417 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1418 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1420 * Target reset method =
1421 * SAS Hard Link Reset / SATA Link Reset
1423 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1424 tm->cm_targ->target_resets++;
1425 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1426 "Sending target reset to target %u\n", target->tid);
1427 tm->cm_complete = mpssas_target_reset_complete;
1428 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1430 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1435 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1436 tm->cm_complete_data = (void *)tm;
1438 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1439 mpssas_tm_timeout, tm);
1441 err = mps_map_command(sc, tm);
1443 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1444 "error %d sending reset type %u\n",
1452 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1454 struct mps_command *cm;
1455 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1456 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1457 struct mpssas_target *targ;
1459 callout_stop(&tm->cm_callout);
1461 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1462 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1466 * Currently there should be no way we can hit this case. It only
1467 * happens when we have a failure to allocate chain frames, and
1468 * task management commands don't have S/G lists.
1470 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1471 mps_dprint(sc, MPS_RECOVERY,
1472 "cm_flags = %#x for abort %p TaskMID %u!\n",
1473 tm->cm_flags, tm, le16toh(req->TaskMID));
1474 mpssas_free_tm(sc, tm);
1478 if (reply == NULL) {
1479 mps_dprint(sc, MPS_RECOVERY,
1480 "NULL abort reply for tm %p TaskMID %u\n",
1481 tm, le16toh(req->TaskMID));
1482 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1483 /* this completion was due to a reset, just cleanup */
1484 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1485 "reset, ignoring NULL abort reply\n");
1487 mpssas_free_tm(sc, tm);
1489 /* we should have gotten a reply. */
1490 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1491 "abort attempt, resetting controller\n");
1497 mps_dprint(sc, MPS_RECOVERY,
1498 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1499 le16toh(req->TaskMID),
1500 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1501 le32toh(reply->TerminationCount));
1503 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1506 * If there are no more timedout commands, we're done with
1507 * error recovery for this target.
1509 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1510 "Finished abort recovery for target %u\n", targ->tid);
1513 mpssas_free_tm(sc, tm);
1514 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1515 /* abort success, but we have more timedout commands to abort */
1516 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1517 "Continuing abort recovery for target %u\n", targ->tid);
1519 mpssas_send_abort(sc, tm, cm);
1521 /* we didn't get a command completion, so the abort
1522 * failed as far as we're concerned. escalate.
1524 mps_dprint(sc, MPS_RECOVERY,
1525 "Abort failed for target %u, sending logical unit reset\n",
1528 mpssas_send_reset(sc, tm,
1529 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1533 #define MPS_ABORT_TIMEOUT 5
1536 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1538 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1539 struct mpssas_target *targ;
1543 if (targ->handle == 0) {
1544 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1545 "%s null devhandle for target_id %d\n",
1546 __func__, cm->cm_ccb->ccb_h.target_id);
1550 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1551 "Aborting command %p\n", cm);
1553 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1554 req->DevHandle = htole16(targ->handle);
1555 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1556 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1558 /* XXX Need to handle invalid LUNs */
1559 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1561 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1564 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1565 tm->cm_complete = mpssas_abort_complete;
1566 tm->cm_complete_data = (void *)tm;
1567 tm->cm_targ = cm->cm_targ;
1568 tm->cm_lun = cm->cm_lun;
1570 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1571 mpssas_tm_timeout, tm);
1575 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1577 err = mps_map_command(sc, tm);
1579 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1580 "error %d sending abort for cm %p SMID %u\n",
1581 err, cm, req->TaskMID);
1586 mpssas_scsiio_timeout(void *data)
1588 sbintime_t elapsed, now;
1590 struct mps_softc *sc;
1591 struct mps_command *cm;
1592 struct mpssas_target *targ;
1594 cm = (struct mps_command *)data;
1600 mtx_assert(&sc->mps_mtx, MA_OWNED);
1602 mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1605 * Run the interrupt handler to make sure it's not pending. This
1606 * isn't perfect because the command could have already completed
1607 * and been re-used, though this is unlikely.
1609 mps_intr_locked(sc);
1610 if (cm->cm_state == MPS_CM_STATE_FREE) {
1611 mpssas_log_command(cm, MPS_XINFO,
1612 "SCSI command %p almost timed out\n", cm);
1616 if (cm->cm_ccb == NULL) {
1617 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1624 elapsed = now - ccb->ccb_h.qos.sim_data;
1625 mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1626 "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1627 targ->tid, targ->handle, ccb->ccb_h.timeout,
1628 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1630 /* XXX first, check the firmware state, to see if it's still
1631 * operational. if not, do a diag reset.
1633 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1634 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1635 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1637 if (targ->tm != NULL) {
1638 /* target already in recovery, just queue up another
1639 * timedout command to be processed later.
1641 mps_dprint(sc, MPS_RECOVERY,
1642 "queued timedout cm %p for processing by tm %p\n",
1644 } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1645 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1646 "Sending abort to target %u for SMID %d\n", targ->tid,
1647 cm->cm_desc.Default.SMID);
1648 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1651 /* start recovery by aborting the first timedout command */
1652 mpssas_send_abort(sc, targ->tm, cm);
1654 /* XXX queue this target up for recovery once a TM becomes
1655 * available. The firmware only has a limited number of
1656 * HighPriority credits for the high priority requests used
1657 * for task management, and we ran out.
1659 * Isilon: don't worry about this for now, since we have
1660 * more credits than disks in an enclosure, and limit
1661 * ourselves to one TM per target for recovery.
1663 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1664 "timedout cm %p failed to allocate a tm\n", cm);
1670 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1672 MPI2_SCSI_IO_REQUEST *req;
1673 struct ccb_scsiio *csio;
1674 struct mps_softc *sc;
1675 struct mpssas_target *targ;
1676 struct mpssas_lun *lun;
1677 struct mps_command *cm;
1678 uint8_t i, lba_byte, *ref_tag_addr;
1679 uint16_t eedp_flags;
1680 uint32_t mpi_control;
1684 mtx_assert(&sc->mps_mtx, MA_OWNED);
1687 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1688 ("Target %d out of bounds in XPT_SCSI_IO\n",
1689 csio->ccb_h.target_id));
1690 targ = &sassc->targets[csio->ccb_h.target_id];
1691 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1692 if (targ->handle == 0x0) {
1693 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1694 __func__, csio->ccb_h.target_id);
1695 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1699 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1700 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1701 "supported %u\n", __func__, csio->ccb_h.target_id);
1702 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1707 * Sometimes, it is possible to get a command that is not "In
1708 * Progress" and was actually aborted by the upper layer. Check for
1709 * this here and complete the command without error.
1711 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1712 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1713 "target %u\n", __func__, csio->ccb_h.target_id);
1718 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1719 * that the volume has timed out. We want volumes to be enumerated
1720 * until they are deleted/removed, not just failed.
1722 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1723 if (targ->devinfo == 0)
1724 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1726 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1731 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1732 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1733 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1739 * If target has a reset in progress, freeze the devq and return. The
1740 * devq will be released when the TM reset is finished.
1742 if (targ->flags & MPSSAS_TARGET_INRESET) {
1743 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1744 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1745 __func__, targ->tid);
1746 xpt_freeze_devq(ccb->ccb_h.path, 1);
1751 cm = mps_alloc_command(sc);
1752 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1754 mps_free_command(sc, cm);
1756 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1757 xpt_freeze_simq(sassc->sim, 1);
1758 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1760 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1761 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1766 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1767 bzero(req, sizeof(*req));
1768 req->DevHandle = htole16(targ->handle);
1769 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1771 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1772 req->SenseBufferLength = MPS_SENSE_LEN;
1774 req->ChainOffset = 0;
1775 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1780 req->DataLength = htole32(csio->dxfer_len);
1781 req->BidirectionalDataLength = 0;
1782 req->IoFlags = htole16(csio->cdb_len);
1785 /* Note: BiDirectional transfers are not supported */
1786 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1788 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1789 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1792 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1793 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1797 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1801 if (csio->cdb_len == 32)
1802 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1804 * It looks like the hardware doesn't require an explicit tag
1805 * number for each transaction. SAM Task Management not supported
1808 switch (csio->tag_action) {
1809 case MSG_HEAD_OF_Q_TAG:
1810 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1812 case MSG_ORDERED_Q_TAG:
1813 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1816 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1818 case CAM_TAG_ACTION_NONE:
1819 case MSG_SIMPLE_Q_TAG:
1821 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1824 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1825 req->Control = htole32(mpi_control);
1826 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1827 mps_free_command(sc, cm);
1828 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1833 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1834 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1836 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1837 req->IoFlags = htole16(csio->cdb_len);
1840 * Check if EEDP is supported and enabled. If it is then check if the
1841 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1842 * is formatted for EEDP support. If all of this is true, set CDB up
1843 * for EEDP transfer.
1845 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1846 if (sc->eedp_enabled && eedp_flags) {
1847 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1848 if (lun->lun_id == csio->ccb_h.target_lun) {
1853 if ((lun != NULL) && (lun->eedp_formatted)) {
1854 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1855 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1856 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1857 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1858 req->EEDPFlags = htole16(eedp_flags);
1861 * If CDB less than 32, fill in Primary Ref Tag with
1862 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1863 * already there. Also, set protection bit. FreeBSD
1864 * currently does not support CDBs bigger than 16, but
1865 * the code doesn't hurt, and will be here for the
1868 if (csio->cdb_len != 32) {
1869 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1870 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1871 PrimaryReferenceTag;
1872 for (i = 0; i < 4; i++) {
1874 req->CDB.CDB32[lba_byte + i];
1877 req->CDB.EEDP32.PrimaryReferenceTag =
1878 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1879 req->CDB.EEDP32.PrimaryApplicationTagMask =
1881 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1885 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1886 req->EEDPFlags = htole16(eedp_flags);
1887 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1893 cm->cm_length = csio->dxfer_len;
1894 if (cm->cm_length != 0) {
1896 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1900 cm->cm_sge = &req->SGL;
1901 cm->cm_sglsize = (32 - 24) * 4;
1902 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1903 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1904 cm->cm_complete = mpssas_scsiio_complete;
1905 cm->cm_complete_data = ccb;
1907 cm->cm_lun = csio->ccb_h.target_lun;
1911 * If HBA is a WD and the command is not for a retry, try to build a
1912 * direct I/O message. If failed, or the command is for a retry, send
1913 * the I/O to the IR volume itself.
1915 if (sc->WD_valid_config) {
1916 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1917 mpssas_direct_drive_io(sassc, cm, ccb);
1919 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1923 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1924 if (csio->bio != NULL)
1925 biotrack(csio->bio, __func__);
1927 csio->ccb_h.qos.sim_data = sbinuptime();
1928 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1929 mpssas_scsiio_timeout, cm, 0);
1932 targ->outstanding++;
1933 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1934 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1936 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1937 __func__, cm, ccb, targ->outstanding);
1939 mps_map_command(sc, cm);
1944 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1947 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1948 Mpi2SCSIIOReply_t *mpi_reply)
1952 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1953 MPI2_IOCSTATUS_MASK;
1954 u8 scsi_state = mpi_reply->SCSIState;
1955 u8 scsi_status = mpi_reply->SCSIStatus;
1956 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1957 const char *desc_ioc_state, *desc_scsi_status;
1959 if (log_info == 0x31170000)
1962 desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1964 desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1967 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1968 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1971 *We can add more detail about underflow data here
1974 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1975 "scsi_state %b\n", desc_scsi_status, scsi_status,
1976 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1977 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1979 if (sc->mps_debug & MPS_XINFO &&
1980 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1981 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1982 scsi_sense_print(csio);
1983 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1986 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1987 response_info = le32toh(mpi_reply->ResponseInfo);
1988 response_bytes = (u8 *)&response_info;
1989 mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1991 mps_describe_table(mps_scsi_taskmgmt_string,
1992 response_bytes[0]));
1997 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1999 MPI2_SCSI_IO_REPLY *rep;
2001 struct ccb_scsiio *csio;
2002 struct mpssas_softc *sassc;
2003 struct scsi_vpd_supported_page_list *vpd_list = NULL;
2004 u8 *TLR_bits, TLR_on;
2007 struct mpssas_target *target;
2008 target_id_t target_id;
2011 mps_dprint(sc, MPS_TRACE,
2012 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2013 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2014 cm->cm_targ->outstanding);
2016 callout_stop(&cm->cm_callout);
2017 mtx_assert(&sc->mps_mtx, MA_OWNED);
2020 ccb = cm->cm_complete_data;
2022 target_id = csio->ccb_h.target_id;
2023 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2025 * XXX KDM if the chain allocation fails, does it matter if we do
2026 * the sync and unload here? It is simpler to do it in every case,
2027 * assuming it doesn't cause problems.
2029 if (cm->cm_data != NULL) {
2030 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2031 dir = BUS_DMASYNC_POSTREAD;
2032 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2033 dir = BUS_DMASYNC_POSTWRITE;
2034 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2035 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2038 cm->cm_targ->completed++;
2039 cm->cm_targ->outstanding--;
2040 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2041 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2043 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2044 if (ccb->csio.bio != NULL)
2045 biotrack(ccb->csio.bio, __func__);
2048 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2049 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2050 if (cm->cm_reply != NULL)
2051 mpssas_log_command(cm, MPS_RECOVERY,
2052 "completed timedout cm %p ccb %p during recovery "
2053 "ioc %x scsi %x state %x xfer %u\n",
2055 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2056 le32toh(rep->TransferCount));
2058 mpssas_log_command(cm, MPS_RECOVERY,
2059 "completed timedout cm %p ccb %p during recovery\n",
2061 } else if (cm->cm_targ->tm != NULL) {
2062 if (cm->cm_reply != NULL)
2063 mpssas_log_command(cm, MPS_RECOVERY,
2064 "completed cm %p ccb %p during recovery "
2065 "ioc %x scsi %x state %x xfer %u\n",
2067 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2068 le32toh(rep->TransferCount));
2070 mpssas_log_command(cm, MPS_RECOVERY,
2071 "completed cm %p ccb %p during recovery\n",
2073 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2074 mpssas_log_command(cm, MPS_RECOVERY,
2075 "reset completed cm %p ccb %p\n",
2079 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2081 * We ran into an error after we tried to map the command,
2082 * so we're getting a callback without queueing the command
2083 * to the hardware. So we set the status here, and it will
2084 * be retained below. We'll go through the "fast path",
2085 * because there can be no reply when we haven't actually
2086 * gone out to the hardware.
2088 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2091 * Currently the only error included in the mask is
2092 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2093 * chain frames. We need to freeze the queue until we get
2094 * a command that completed without this error, which will
2095 * hopefully have some chain frames attached that we can
2096 * use. If we wanted to get smarter about it, we would
2097 * only unfreeze the queue in this condition when we're
2098 * sure that we're getting some chain frames back. That's
2099 * probably unnecessary.
2101 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2102 xpt_freeze_simq(sassc->sim, 1);
2103 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2104 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2105 "freezing SIM queue\n");
2110 * If this is a Start Stop Unit command and it was issued by the driver
2111 * during shutdown, decrement the refcount to account for all of the
2112 * commands that were sent. All SSU commands should be completed before
2113 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2116 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2117 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2121 /* Take the fast path to completion */
2122 if (cm->cm_reply == NULL) {
2123 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2124 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2125 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2127 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2128 ccb->csio.scsi_status = SCSI_STATUS_OK;
2130 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2131 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2132 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2133 mps_dprint(sc, MPS_XINFO,
2134 "Unfreezing SIM queue\n");
2139 * There are two scenarios where the status won't be
2140 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2141 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2143 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2145 * Freeze the dev queue so that commands are
2146 * executed in the correct order after error
2149 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2150 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2152 mps_free_command(sc, cm);
2157 mpssas_log_command(cm, MPS_XINFO,
2158 "ioc %x scsi %x state %x xfer %u\n",
2159 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2160 le32toh(rep->TransferCount));
2163 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2164 * Volume if an error occurred (normal I/O retry). Use the original
2165 * CCB, but set a flag that this will be a retry so that it's sent to
2166 * the original volume. Free the command but reuse the CCB.
2168 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2169 mps_free_command(sc, cm);
2170 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2171 mpssas_action_scsiio(sassc, ccb);
2174 ccb->ccb_h.sim_priv.entries[0].field = 0;
2176 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2177 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2178 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2180 case MPI2_IOCSTATUS_SUCCESS:
2181 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2183 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2184 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2185 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2187 /* Completion failed at the transport level. */
2188 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2189 MPI2_SCSI_STATE_TERMINATED)) {
2190 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2194 /* In a modern packetized environment, an autosense failure
2195 * implies that there's not much else that can be done to
2196 * recover the command.
2198 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2199 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2204 * CAM doesn't care about SAS Response Info data, but if this is
2205 * the state check if TLR should be done. If not, clear the
2206 * TLR_bits for the target.
2208 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2209 ((le32toh(rep->ResponseInfo) &
2210 MPI2_SCSI_RI_MASK_REASONCODE) ==
2211 MPS_SCSI_RI_INVALID_FRAME)) {
2212 sc->mapping_table[target_id].TLR_bits =
2213 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2217 * Intentionally override the normal SCSI status reporting
2218 * for these two cases. These are likely to happen in a
2219 * multi-initiator environment, and we want to make sure that
2220 * CAM retries these commands rather than fail them.
2222 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2223 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2224 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2228 /* Handle normal status and sense */
2229 csio->scsi_status = rep->SCSIStatus;
2230 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2231 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2233 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2235 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2236 int sense_len, returned_sense_len;
2238 returned_sense_len = min(le32toh(rep->SenseCount),
2239 sizeof(struct scsi_sense_data));
2240 if (returned_sense_len < ccb->csio.sense_len)
2241 ccb->csio.sense_resid = ccb->csio.sense_len -
2244 ccb->csio.sense_resid = 0;
2246 sense_len = min(returned_sense_len,
2247 ccb->csio.sense_len - ccb->csio.sense_resid);
2248 bzero(&ccb->csio.sense_data,
2249 sizeof(ccb->csio.sense_data));
2250 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2251 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2255 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2256 * and it's page code 0 (Supported Page List), and there is
2257 * inquiry data, and this is for a sequential access device, and
2258 * the device is an SSP target, and TLR is supported by the
2259 * controller, turn the TLR_bits value ON if page 0x90 is
2262 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2263 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2264 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2265 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2266 (csio->data_ptr != NULL) &&
2267 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2268 (sc->control_TLR) &&
2269 (sc->mapping_table[target_id].device_info &
2270 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2271 vpd_list = (struct scsi_vpd_supported_page_list *)
2273 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2274 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2275 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2276 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2277 csio->cdb_io.cdb_bytes[4];
2278 alloc_len -= csio->resid;
2279 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2280 if (vpd_list->list[i] == 0x90) {
2288 * If this is a SATA direct-access end device, mark it so that
2289 * a SCSI StartStopUnit command will be sent to it when the
2290 * driver is being shutdown.
2292 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2293 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2294 (sc->mapping_table[target_id].device_info &
2295 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2296 ((sc->mapping_table[target_id].device_info &
2297 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2298 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2299 target = &sassc->targets[target_id];
2300 target->supports_SSU = TRUE;
2301 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2305 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2306 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2308 * If devinfo is 0 this will be a volume. In that case don't
2309 * tell CAM that the volume is not there. We want volumes to
2310 * be enumerated until they are deleted/removed, not just
2313 if (cm->cm_targ->devinfo == 0)
2314 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2316 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2318 case MPI2_IOCSTATUS_INVALID_SGL:
2319 mps_print_scsiio_cmd(sc, cm);
2320 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2322 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2324 * This is one of the responses that comes back when an I/O
2325 * has been aborted. If it is because of a timeout that we
2326 * initiated, just set the status to CAM_CMD_TIMEOUT.
2327 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2328 * command is the same (it gets retried, subject to the
2329 * retry counter), the only difference is what gets printed
2332 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2333 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2335 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2337 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2338 /* resid is ignored for this condition */
2340 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2342 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2343 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2345 * These can sometimes be transient transport-related
2346 * errors, and sometimes persistent drive-related errors.
2347 * We used to retry these without decrementing the retry
2348 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2349 * we hit a persistent drive problem that returns one of
2350 * these error codes, we would retry indefinitely. So,
2351 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2352 * count and avoid infinite retries. We're taking the
2353 * potential risk of flagging false failures in the event
2354 * of a topology-related error (e.g. a SAS expander problem
2355 * causes a command addressed to a drive to fail), but
2356 * avoiding getting into an infinite retry loop.
2358 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2359 mps_dprint(sc, MPS_INFO,
2360 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2361 mps_describe_table(mps_iocstatus_string,
2362 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2363 target_id, cm->cm_desc.Default.SMID,
2364 le32toh(rep->IOCLogInfo));
2365 mps_dprint(sc, MPS_XINFO,
2366 "SCSIStatus %x SCSIState %x xfercount %u\n",
2367 rep->SCSIStatus, rep->SCSIState,
2368 le32toh(rep->TransferCount));
2370 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2371 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2372 case MPI2_IOCSTATUS_INVALID_VPID:
2373 case MPI2_IOCSTATUS_INVALID_FIELD:
2374 case MPI2_IOCSTATUS_INVALID_STATE:
2375 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2376 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2377 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2378 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2379 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2381 mpssas_log_command(cm, MPS_XINFO,
2382 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2383 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2384 rep->SCSIStatus, rep->SCSIState,
2385 le32toh(rep->TransferCount));
2386 csio->resid = cm->cm_length;
2387 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2391 mps_sc_failed_io_info(sc,csio,rep);
2393 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2394 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2395 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2396 mps_dprint(sc, MPS_XINFO, "Command completed, "
2397 "unfreezing SIM queue\n");
2400 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2401 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2402 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2405 mps_free_command(sc, cm);
2409 /* All Request reached here are Endian safe */
2411 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2413 pMpi2SCSIIORequest_t pIO_req;
2414 struct mps_softc *sc = sassc->sc;
2416 uint32_t physLBA, stripe_offset, stripe_unit;
2417 uint32_t io_size, column;
2418 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2421 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2422 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2423 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2424 * bit different than the 10/16 CDBs, handle them separately.
2426 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2427 CDB = pIO_req->CDB.CDB32;
2430 * Handle 6 byte CDBs.
2432 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2433 (CDB[0] == WRITE_6))) {
2435 * Get the transfer size in blocks.
2437 io_size = (cm->cm_length >> sc->DD_block_exponent);
2440 * Get virtual LBA given in the CDB.
2442 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2443 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2446 * Check that LBA range for I/O does not exceed volume's
2449 if ((virtLBA + (uint64_t)io_size - 1) <=
2452 * Check if the I/O crosses a stripe boundary. If not,
2453 * translate the virtual LBA to a physical LBA and set
2454 * the DevHandle for the PhysDisk to be used. If it
2455 * does cross a boundary, do normal I/O. To get the
2456 * right DevHandle to use, get the map number for the
2457 * column, then use that map number to look up the
2458 * DevHandle of the PhysDisk.
2460 stripe_offset = (uint32_t)virtLBA &
2461 (sc->DD_stripe_size - 1);
2462 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2463 physLBA = (uint32_t)virtLBA >>
2464 sc->DD_stripe_exponent;
2465 stripe_unit = physLBA / sc->DD_num_phys_disks;
2466 column = physLBA % sc->DD_num_phys_disks;
2467 pIO_req->DevHandle =
2468 htole16(sc->DD_column_map[column].dev_handle);
2469 /* ???? Is this endian safe*/
2470 cm->cm_desc.SCSIIO.DevHandle =
2473 physLBA = (stripe_unit <<
2474 sc->DD_stripe_exponent) + stripe_offset;
2475 ptrLBA = &pIO_req->CDB.CDB32[1];
2476 physLBA_byte = (uint8_t)(physLBA >> 16);
2477 *ptrLBA = physLBA_byte;
2478 ptrLBA = &pIO_req->CDB.CDB32[2];
2479 physLBA_byte = (uint8_t)(physLBA >> 8);
2480 *ptrLBA = physLBA_byte;
2481 ptrLBA = &pIO_req->CDB.CDB32[3];
2482 physLBA_byte = (uint8_t)physLBA;
2483 *ptrLBA = physLBA_byte;
2486 * Set flag that Direct Drive I/O is
2489 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2496 * Handle 10, 12 or 16 byte CDBs.
2498 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2499 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2500 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2501 (CDB[0] == WRITE_12))) {
2503 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2504 * are 0. If not, this is accessing beyond 2TB so handle it in
2505 * the else section. 10-byte and 12-byte CDB's are OK.
2506 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2507 * ready to accept 12byte CDB for Direct IOs.
2509 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2510 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2511 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2513 * Get the transfer size in blocks.
2515 io_size = (cm->cm_length >> sc->DD_block_exponent);
2518 * Get virtual LBA. Point to correct lower 4 bytes of
2519 * LBA in the CDB depending on command.
2521 lba_idx = ((CDB[0] == READ_12) ||
2522 (CDB[0] == WRITE_12) ||
2523 (CDB[0] == READ_10) ||
2524 (CDB[0] == WRITE_10))? 2 : 6;
2525 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2526 ((uint64_t)CDB[lba_idx + 1] << 16) |
2527 ((uint64_t)CDB[lba_idx + 2] << 8) |
2528 (uint64_t)CDB[lba_idx + 3];
2531 * Check that LBA range for I/O does not exceed volume's
2534 if ((virtLBA + (uint64_t)io_size - 1) <=
2537 * Check if the I/O crosses a stripe boundary.
2538 * If not, translate the virtual LBA to a
2539 * physical LBA and set the DevHandle for the
2540 * PhysDisk to be used. If it does cross a
2541 * boundary, do normal I/O. To get the right
2542 * DevHandle to use, get the map number for the
2543 * column, then use that map number to look up
2544 * the DevHandle of the PhysDisk.
2546 stripe_offset = (uint32_t)virtLBA &
2547 (sc->DD_stripe_size - 1);
2548 if ((stripe_offset + io_size) <=
2549 sc->DD_stripe_size) {
2550 physLBA = (uint32_t)virtLBA >>
2551 sc->DD_stripe_exponent;
2552 stripe_unit = physLBA /
2553 sc->DD_num_phys_disks;
2555 sc->DD_num_phys_disks;
2556 pIO_req->DevHandle =
2557 htole16(sc->DD_column_map[column].
2559 cm->cm_desc.SCSIIO.DevHandle =
2562 physLBA = (stripe_unit <<
2563 sc->DD_stripe_exponent) +
2566 &pIO_req->CDB.CDB32[lba_idx];
2567 physLBA_byte = (uint8_t)(physLBA >> 24);
2568 *ptrLBA = physLBA_byte;
2570 &pIO_req->CDB.CDB32[lba_idx + 1];
2571 physLBA_byte = (uint8_t)(physLBA >> 16);
2572 *ptrLBA = physLBA_byte;
2574 &pIO_req->CDB.CDB32[lba_idx + 2];
2575 physLBA_byte = (uint8_t)(physLBA >> 8);
2576 *ptrLBA = physLBA_byte;
2578 &pIO_req->CDB.CDB32[lba_idx + 3];
2579 physLBA_byte = (uint8_t)physLBA;
2580 *ptrLBA = physLBA_byte;
2583 * Set flag that Direct Drive I/O is
2586 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2591 * 16-byte CDB and the upper 4 bytes of the CDB are not
2592 * 0. Get the transfer size in blocks.
2594 io_size = (cm->cm_length >> sc->DD_block_exponent);
2599 virtLBA = ((uint64_t)CDB[2] << 54) |
2600 ((uint64_t)CDB[3] << 48) |
2601 ((uint64_t)CDB[4] << 40) |
2602 ((uint64_t)CDB[5] << 32) |
2603 ((uint64_t)CDB[6] << 24) |
2604 ((uint64_t)CDB[7] << 16) |
2605 ((uint64_t)CDB[8] << 8) |
2609 * Check that LBA range for I/O does not exceed volume's
2612 if ((virtLBA + (uint64_t)io_size - 1) <=
2615 * Check if the I/O crosses a stripe boundary.
2616 * If not, translate the virtual LBA to a
2617 * physical LBA and set the DevHandle for the
2618 * PhysDisk to be used. If it does cross a
2619 * boundary, do normal I/O. To get the right
2620 * DevHandle to use, get the map number for the
2621 * column, then use that map number to look up
2622 * the DevHandle of the PhysDisk.
2624 stripe_offset = (uint32_t)virtLBA &
2625 (sc->DD_stripe_size - 1);
2626 if ((stripe_offset + io_size) <=
2627 sc->DD_stripe_size) {
2628 physLBA = (uint32_t)(virtLBA >>
2629 sc->DD_stripe_exponent);
2630 stripe_unit = physLBA /
2631 sc->DD_num_phys_disks;
2633 sc->DD_num_phys_disks;
2634 pIO_req->DevHandle =
2635 htole16(sc->DD_column_map[column].
2637 cm->cm_desc.SCSIIO.DevHandle =
2640 physLBA = (stripe_unit <<
2641 sc->DD_stripe_exponent) +
2645 * Set upper 4 bytes of LBA to 0. We
2646 * assume that the phys disks are less
2647 * than 2 TB's in size. Then, set the
2650 pIO_req->CDB.CDB32[2] = 0;
2651 pIO_req->CDB.CDB32[3] = 0;
2652 pIO_req->CDB.CDB32[4] = 0;
2653 pIO_req->CDB.CDB32[5] = 0;
2654 ptrLBA = &pIO_req->CDB.CDB32[6];
2655 physLBA_byte = (uint8_t)(physLBA >> 24);
2656 *ptrLBA = physLBA_byte;
2657 ptrLBA = &pIO_req->CDB.CDB32[7];
2658 physLBA_byte = (uint8_t)(physLBA >> 16);
2659 *ptrLBA = physLBA_byte;
2660 ptrLBA = &pIO_req->CDB.CDB32[8];
2661 physLBA_byte = (uint8_t)(physLBA >> 8);
2662 *ptrLBA = physLBA_byte;
2663 ptrLBA = &pIO_req->CDB.CDB32[9];
2664 physLBA_byte = (uint8_t)physLBA;
2665 *ptrLBA = physLBA_byte;
2668 * Set flag that Direct Drive I/O is
2671 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2678 #if __FreeBSD_version >= 900026
2680 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2682 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2683 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2687 ccb = cm->cm_complete_data;
2690 * Currently there should be no way we can hit this case. It only
2691 * happens when we have a failure to allocate chain frames, and SMP
2692 * commands require two S/G elements only. That should be handled
2693 * in the standard request size.
2695 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2696 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2697 __func__, cm->cm_flags);
2698 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2702 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2704 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2705 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2709 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2710 sasaddr = le32toh(req->SASAddress.Low);
2711 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2713 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2714 MPI2_IOCSTATUS_SUCCESS ||
2715 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2716 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2717 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2718 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2722 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2723 "%#jx completed successfully\n", __func__,
2724 (uintmax_t)sasaddr);
2726 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2727 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2729 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2733 * We sync in both directions because we had DMAs in the S/G list
2734 * in both directions.
2736 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2737 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2738 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2739 mps_free_command(sc, cm);
2744 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2746 struct mps_command *cm;
2747 uint8_t *request, *response;
2748 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2749 struct mps_softc *sc;
2756 * XXX We don't yet support physical addresses here.
2758 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2759 case CAM_DATA_PADDR:
2760 case CAM_DATA_SG_PADDR:
2761 mps_dprint(sc, MPS_ERROR,
2762 "%s: physical addresses not supported\n", __func__);
2763 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2768 * The chip does not support more than one buffer for the
2769 * request or response.
2771 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2772 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2773 mps_dprint(sc, MPS_ERROR,
2774 "%s: multiple request or response "
2775 "buffer segments not supported for SMP\n",
2777 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2783 * The CAM_SCATTER_VALID flag was originally implemented
2784 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2785 * We have two. So, just take that flag to mean that we
2786 * might have S/G lists, and look at the S/G segment count
2787 * to figure out whether that is the case for each individual
2790 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2791 bus_dma_segment_t *req_sg;
2793 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2794 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2796 request = ccb->smpio.smp_request;
2798 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2799 bus_dma_segment_t *rsp_sg;
2801 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2802 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2804 response = ccb->smpio.smp_response;
2806 case CAM_DATA_VADDR:
2807 request = ccb->smpio.smp_request;
2808 response = ccb->smpio.smp_response;
2811 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2816 cm = mps_alloc_command(sc);
2818 mps_dprint(sc, MPS_ERROR,
2819 "%s: cannot allocate command\n", __func__);
2820 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2825 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2826 bzero(req, sizeof(*req));
2827 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2829 /* Allow the chip to use any route to this SAS address. */
2830 req->PhysicalPort = 0xff;
2832 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2834 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2836 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2837 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2839 mpi_init_sge(cm, req, &req->SGL);
2842 * Set up a uio to pass into mps_map_command(). This allows us to
2843 * do one map command, and one busdma call in there.
2845 cm->cm_uio.uio_iov = cm->cm_iovec;
2846 cm->cm_uio.uio_iovcnt = 2;
2847 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2850 * The read/write flag isn't used by busdma, but set it just in
2851 * case. This isn't exactly accurate, either, since we're going in
2854 cm->cm_uio.uio_rw = UIO_WRITE;
2856 cm->cm_iovec[0].iov_base = request;
2857 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2858 cm->cm_iovec[1].iov_base = response;
2859 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2861 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2862 cm->cm_iovec[1].iov_len;
2865 * Trigger a warning message in mps_data_cb() for the user if we
2866 * wind up exceeding two S/G segments. The chip expects one
2867 * segment for the request and another for the response.
2869 cm->cm_max_segs = 2;
2871 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2872 cm->cm_complete = mpssas_smpio_complete;
2873 cm->cm_complete_data = ccb;
2876 * Tell the mapping code that we're using a uio, and that this is
2877 * an SMP passthrough request. There is a little special-case
2878 * logic there (in mps_data_cb()) to handle the bidirectional
2881 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2882 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2884 /* The chip data format is little endian. */
2885 req->SASAddress.High = htole32(sasaddr >> 32);
2886 req->SASAddress.Low = htole32(sasaddr);
2889 * XXX Note that we don't have a timeout/abort mechanism here.
2890 * From the manual, it looks like task management requests only
2891 * work for SCSI IO and SATA passthrough requests. We may need to
2892 * have a mechanism to retry requests in the event of a chip reset
2893 * at least. Hopefully the chip will insure that any errors short
2894 * of that are relayed back to the driver.
2896 error = mps_map_command(sc, cm);
2897 if ((error != 0) && (error != EINPROGRESS)) {
2898 mps_dprint(sc, MPS_ERROR,
2899 "%s: error %d returned from mps_map_command()\n",
2907 mps_free_command(sc, cm);
2908 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2915 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2917 struct mps_softc *sc;
2918 struct mpssas_target *targ;
2919 uint64_t sasaddr = 0;
2924 * Make sure the target exists.
2926 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2927 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2928 targ = &sassc->targets[ccb->ccb_h.target_id];
2929 if (targ->handle == 0x0) {
2930 mps_dprint(sc, MPS_ERROR,
2931 "%s: target %d does not exist!\n", __func__,
2932 ccb->ccb_h.target_id);
2933 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2939 * If this device has an embedded SMP target, we'll talk to it
2941 * figure out what the expander's address is.
2943 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2944 sasaddr = targ->sasaddr;
2947 * If we don't have a SAS address for the expander yet, try
2948 * grabbing it from the page 0x83 information cached in the
2949 * transport layer for this target. LSI expanders report the
2950 * expander SAS address as the port-associated SAS address in
2951 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2954 * XXX KDM disable this for now, but leave it commented out so that
2955 * it is obvious that this is another possible way to get the SAS
2958 * The parent handle method below is a little more reliable, and
2959 * the other benefit is that it works for devices other than SES
2960 * devices. So you can send a SMP request to a da(4) device and it
2961 * will get routed to the expander that device is attached to.
2962 * (Assuming the da(4) device doesn't contain an SMP target...)
2966 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2970 * If we still don't have a SAS address for the expander, look for
2971 * the parent device of this device, which is probably the expander.
2974 #ifdef OLD_MPS_PROBE
2975 struct mpssas_target *parent_target;
2978 if (targ->parent_handle == 0x0) {
2979 mps_dprint(sc, MPS_ERROR,
2980 "%s: handle %d does not have a valid "
2981 "parent handle!\n", __func__, targ->handle);
2982 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2985 #ifdef OLD_MPS_PROBE
2986 parent_target = mpssas_find_target_by_handle(sassc, 0,
2987 targ->parent_handle);
2989 if (parent_target == NULL) {
2990 mps_dprint(sc, MPS_ERROR,
2991 "%s: handle %d does not have a valid "
2992 "parent target!\n", __func__, targ->handle);
2993 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2997 if ((parent_target->devinfo &
2998 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2999 mps_dprint(sc, MPS_ERROR,
3000 "%s: handle %d parent %d does not "
3001 "have an SMP target!\n", __func__,
3002 targ->handle, parent_target->handle);
3003 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3008 sasaddr = parent_target->sasaddr;
3009 #else /* OLD_MPS_PROBE */
3010 if ((targ->parent_devinfo &
3011 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3012 mps_dprint(sc, MPS_ERROR,
3013 "%s: handle %d parent %d does not "
3014 "have an SMP target!\n", __func__,
3015 targ->handle, targ->parent_handle);
3016 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3020 if (targ->parent_sasaddr == 0x0) {
3021 mps_dprint(sc, MPS_ERROR,
3022 "%s: handle %d parent handle %d does "
3023 "not have a valid SAS address!\n",
3024 __func__, targ->handle, targ->parent_handle);
3025 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3029 sasaddr = targ->parent_sasaddr;
3030 #endif /* OLD_MPS_PROBE */
3035 mps_dprint(sc, MPS_INFO,
3036 "%s: unable to find SAS address for handle %d\n",
3037 __func__, targ->handle);
3038 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3041 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3049 #endif //__FreeBSD_version >= 900026
3052 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3054 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3055 struct mps_softc *sc;
3056 struct mps_command *tm;
3057 struct mpssas_target *targ;
3059 MPS_FUNCTRACE(sassc->sc);
3060 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3062 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3063 ("Target %d out of bounds in XPT_RESET_DEV\n",
3064 ccb->ccb_h.target_id));
3066 tm = mps_alloc_command(sc);
3068 mps_dprint(sc, MPS_ERROR,
3069 "command alloc failure in mpssas_action_resetdev\n");
3070 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3075 targ = &sassc->targets[ccb->ccb_h.target_id];
3076 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3077 req->DevHandle = htole16(targ->handle);
3078 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3079 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3081 /* SAS Hard Link Reset / SATA Link Reset */
3082 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3085 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3086 tm->cm_complete = mpssas_resetdev_complete;
3087 tm->cm_complete_data = ccb;
3089 targ->flags |= MPSSAS_TARGET_INRESET;
3091 mps_map_command(sc, tm);
3095 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3097 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3101 mtx_assert(&sc->mps_mtx, MA_OWNED);
3103 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3104 ccb = tm->cm_complete_data;
3107 * Currently there should be no way we can hit this case. It only
3108 * happens when we have a failure to allocate chain frames, and
3109 * task management commands don't have S/G lists.
3111 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3112 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3114 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3116 mps_dprint(sc, MPS_ERROR,
3117 "%s: cm_flags = %#x for reset of handle %#04x! "
3118 "This should not happen!\n", __func__, tm->cm_flags,
3120 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3124 mps_dprint(sc, MPS_XINFO,
3125 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3126 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3128 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3129 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3130 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3134 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3138 mpssas_free_tm(sc, tm);
3143 mpssas_poll(struct cam_sim *sim)
3145 struct mpssas_softc *sassc;
3147 sassc = cam_sim_softc(sim);
3149 if (sassc->sc->mps_debug & MPS_TRACE) {
3150 /* frequent debug messages during a panic just slow
3151 * everything down too much.
3153 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3154 sassc->sc->mps_debug &= ~MPS_TRACE;
3157 mps_intr_locked(sassc->sc);
3161 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3164 struct mps_softc *sc;
3166 sc = (struct mps_softc *)callback_arg;
3169 #if (__FreeBSD_version >= 1000006) || \
3170 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3171 case AC_ADVINFO_CHANGED: {
3172 struct mpssas_target *target;
3173 struct mpssas_softc *sassc;
3174 struct scsi_read_capacity_data_long rcap_buf;
3175 struct ccb_dev_advinfo cdai;
3176 struct mpssas_lun *lun;
3181 buftype = (uintptr_t)arg;
3187 * We're only interested in read capacity data changes.
3189 if (buftype != CDAI_TYPE_RCAPLONG)
3193 * We should have a handle for this, but check to make sure.
3195 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3196 ("Target %d out of bounds in mpssas_async\n",
3197 xpt_path_target_id(path)));
3198 target = &sassc->targets[xpt_path_target_id(path)];
3199 if (target->handle == 0)
3202 lunid = xpt_path_lun_id(path);
3204 SLIST_FOREACH(lun, &target->luns, lun_link) {
3205 if (lun->lun_id == lunid) {
3211 if (found_lun == 0) {
3212 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3215 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3216 "LUN for EEDP support.\n");
3219 lun->lun_id = lunid;
3220 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3223 bzero(&rcap_buf, sizeof(rcap_buf));
3224 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3225 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3226 cdai.ccb_h.flags = CAM_DIR_IN;
3227 cdai.buftype = CDAI_TYPE_RCAPLONG;
3228 #if (__FreeBSD_version >= 1100061) || \
3229 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3230 cdai.flags = CDAI_FLAG_NONE;
3234 cdai.bufsiz = sizeof(rcap_buf);
3235 cdai.buf = (uint8_t *)&rcap_buf;
3236 xpt_action((union ccb *)&cdai);
3237 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3238 cam_release_devq(cdai.ccb_h.path,
3241 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3242 && (rcap_buf.prot & SRC16_PROT_EN)) {
3243 lun->eedp_formatted = TRUE;
3244 lun->eedp_block_size = scsi_4btoul(rcap_buf.length);
3246 lun->eedp_formatted = FALSE;
3247 lun->eedp_block_size = 0;
3252 case AC_FOUND_DEVICE: {
3253 struct ccb_getdev *cgd;
3256 mpssas_check_eedp(sc, path, cgd);
3265 #if (__FreeBSD_version < 901503) || \
3266 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3268 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3269 struct ccb_getdev *cgd)
3271 struct mpssas_softc *sassc = sc->sassc;
3272 struct ccb_scsiio *csio;
3273 struct scsi_read_capacity_16 *scsi_cmd;
3274 struct scsi_read_capacity_eedp *rcap_buf;
3276 target_id_t targetid;
3279 struct cam_path *local_path;
3280 struct mpssas_target *target;
3281 struct mpssas_lun *lun;
3286 pathid = cam_sim_path(sassc->sim);
3287 targetid = xpt_path_target_id(path);
3288 lunid = xpt_path_lun_id(path);
3290 KASSERT(targetid < sassc->maxtargets,
3291 ("Target %d out of bounds in mpssas_check_eedp\n",
3293 target = &sassc->targets[targetid];
3294 if (target->handle == 0x0)
3298 * Determine if the device is EEDP capable.
3300 * If this flag is set in the inquiry data,
3301 * the device supports protection information,
3302 * and must support the 16 byte read
3303 * capacity command, otherwise continue without
3304 * sending read cap 16
3306 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3310 * Issue a READ CAPACITY 16 command. This info
3311 * is used to determine if the LUN is formatted
3314 ccb = xpt_alloc_ccb_nowait();
3316 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3317 "for EEDP support.\n");
3321 if (xpt_create_path(&local_path, xpt_periph,
3322 pathid, targetid, lunid) != CAM_REQ_CMP) {
3323 mps_dprint(sc, MPS_ERROR, "Unable to create "
3324 "path for EEDP support\n");
3330 * If LUN is already in list, don't create a new
3334 SLIST_FOREACH(lun, &target->luns, lun_link) {
3335 if (lun->lun_id == lunid) {
3341 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3344 mps_dprint(sc, MPS_ERROR,
3345 "Unable to alloc LUN for EEDP support.\n");
3346 xpt_free_path(local_path);
3350 lun->lun_id = lunid;
3351 SLIST_INSERT_HEAD(&target->luns, lun,
3355 xpt_path_string(local_path, path_str, sizeof(path_str));
3357 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3358 path_str, target->handle);
3361 * Issue a READ CAPACITY 16 command for the LUN.
3362 * The mpssas_read_cap_done function will load
3363 * the read cap info into the LUN struct.
3365 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3366 M_MPT2, M_NOWAIT | M_ZERO);
3367 if (rcap_buf == NULL) {
3368 mps_dprint(sc, MPS_FAULT,
3369 "Unable to alloc read capacity buffer for EEDP support.\n");
3370 xpt_free_path(ccb->ccb_h.path);
3374 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3376 csio->ccb_h.func_code = XPT_SCSI_IO;
3377 csio->ccb_h.flags = CAM_DIR_IN;
3378 csio->ccb_h.retry_count = 4;
3379 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3380 csio->ccb_h.timeout = 60000;
3381 csio->data_ptr = (uint8_t *)rcap_buf;
3382 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3383 csio->sense_len = MPS_SENSE_LEN;
3384 csio->cdb_len = sizeof(*scsi_cmd);
3385 csio->tag_action = MSG_SIMPLE_Q_TAG;
3387 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3388 bzero(scsi_cmd, sizeof(*scsi_cmd));
3389 scsi_cmd->opcode = 0x9E;
3390 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3391 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3393 ccb->ccb_h.ppriv_ptr1 = sassc;
3398 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3400 struct mpssas_softc *sassc;
3401 struct mpssas_target *target;
3402 struct mpssas_lun *lun;
3403 struct scsi_read_capacity_eedp *rcap_buf;
3405 if (done_ccb == NULL)
3408 /* Driver need to release devq, it Scsi command is
3409 * generated by driver internally.
3410 * Currently there is a single place where driver
3411 * calls scsi command internally. In future if driver
3412 * calls more scsi command internally, it needs to release
3413 * devq internally, since those command will not go back to
3416 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3417 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3418 xpt_release_devq(done_ccb->ccb_h.path,
3419 /*count*/ 1, /*run_queue*/TRUE);
3422 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3425 * Get the LUN ID for the path and look it up in the LUN list for the
3428 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3429 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3430 ("Target %d out of bounds in mpssas_read_cap_done\n",
3431 done_ccb->ccb_h.target_id));
3432 target = &sassc->targets[done_ccb->ccb_h.target_id];
3433 SLIST_FOREACH(lun, &target->luns, lun_link) {
3434 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3438 * Got the LUN in the target's LUN list. Fill it in
3439 * with EEDP info. If the READ CAP 16 command had some
3440 * SCSI error (common if command is not supported), mark
3441 * the lun as not supporting EEDP and set the block size
3444 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3445 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3446 lun->eedp_formatted = FALSE;
3447 lun->eedp_block_size = 0;
3451 if (rcap_buf->protect & 0x01) {
3452 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3453 "target ID %d is formatted for EEDP "
3454 "support.\n", done_ccb->ccb_h.target_lun,
3455 done_ccb->ccb_h.target_id);
3456 lun->eedp_formatted = TRUE;
3457 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3462 // Finished with this CCB and path.
3463 free(rcap_buf, M_MPT2);
3464 xpt_free_path(done_ccb->ccb_h.path);
3465 xpt_free_ccb(done_ccb);
3467 #endif /* (__FreeBSD_version < 901503) || \
3468 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3471 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3472 struct mpssas_target *target, lun_id_t lun_id)
3478 * Set the INRESET flag for this target so that no I/O will be sent to
3479 * the target until the reset has completed. If an I/O request does
3480 * happen, the devq will be frozen. The CCB holds the path which is
3481 * used to release the devq. The devq is released and the CCB is freed
3482 * when the TM completes.
3484 ccb = xpt_alloc_ccb_nowait();
3486 path_id = cam_sim_path(sc->sassc->sim);
3487 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3488 target->tid, lun_id) != CAM_REQ_CMP) {
3492 tm->cm_targ = target;
3493 target->flags |= MPSSAS_TARGET_INRESET;
3499 mpssas_startup(struct mps_softc *sc)
3503 * Send the port enable message and set the wait_for_port_enable flag.
3504 * This flag helps to keep the simq frozen until all discovery events
3507 sc->wait_for_port_enable = 1;
3508 mpssas_send_portenable(sc);
3513 mpssas_send_portenable(struct mps_softc *sc)
3515 MPI2_PORT_ENABLE_REQUEST *request;
3516 struct mps_command *cm;
3520 if ((cm = mps_alloc_command(sc)) == NULL)
3522 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3523 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3524 request->MsgFlags = 0;
3526 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3527 cm->cm_complete = mpssas_portenable_complete;
3531 mps_map_command(sc, cm);
3532 mps_dprint(sc, MPS_XINFO,
3533 "mps_send_portenable finished cm %p req %p complete %p\n",
3534 cm, cm->cm_req, cm->cm_complete);
3539 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3541 MPI2_PORT_ENABLE_REPLY *reply;
3542 struct mpssas_softc *sassc;
3548 * Currently there should be no way we can hit this case. It only
3549 * happens when we have a failure to allocate chain frames, and
3550 * port enable commands don't have S/G lists.
3552 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3553 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3554 "This should not happen!\n", __func__, cm->cm_flags);
3557 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3559 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3560 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3561 MPI2_IOCSTATUS_SUCCESS)
3562 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3564 mps_free_command(sc, cm);
3567 * Get WarpDrive info after discovery is complete but before the scan
3568 * starts. At this point, all devices are ready to be exposed to the
3569 * OS. If devices should be hidden instead, take them out of the
3570 * 'targets' array before the scan. The devinfo for a disk will have
3571 * some info and a volume's will be 0. Use that to remove disks.
3573 mps_wd_config_pages(sc);
3576 * Done waiting for port enable to complete. Decrement the refcount.
3577 * If refcount is 0, discovery is complete and a rescan of the bus can
3578 * take place. Since the simq was explicitly frozen before port
3579 * enable, it must be explicitly released here to keep the
3580 * freeze/release count in sync.
3582 sc->wait_for_port_enable = 0;
3583 sc->port_enable_complete = 1;
3584 wakeup(&sc->port_enable_complete);
3585 mpssas_startup_decrement(sassc);
3589 mpssas_check_id(struct mpssas_softc *sassc, int id)
3591 struct mps_softc *sc = sassc->sc;
3595 ids = &sc->exclude_ids[0];
3596 while((name = strsep(&ids, ",")) != NULL) {
3597 if (name[0] == '\0')
3599 if (strtol(name, NULL, 0) == (long)id)
3607 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3609 struct mpssas_softc *sassc;
3610 struct mpssas_lun *lun, *lun_tmp;
3611 struct mpssas_target *targ;
3616 * The number of targets is based on IOC Facts, so free all of
3617 * the allocated LUNs for each target and then the target buffer
3620 for (i=0; i< maxtargets; i++) {
3621 targ = &sassc->targets[i];
3622 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3626 free(sassc->targets, M_MPT2);
3628 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3629 M_MPT2, M_WAITOK|M_ZERO);
3630 if (!sassc->targets) {
3631 panic("%s failed to alloc targets with error %d\n",