2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 /* Communications core for Avago Technologies (LSI) MPT2 */
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
50 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
59 #include <machine/bus.h>
60 #include <machine/resource.h>
63 #include <machine/stdarg.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #if __FreeBSD_version >= 900026
76 #include <cam/scsi/smp_all.h>
79 #include <dev/mps/mpi/mpi2_type.h>
80 #include <dev/mps/mpi/mpi2.h>
81 #include <dev/mps/mpi/mpi2_ioc.h>
82 #include <dev/mps/mpi/mpi2_sas.h>
83 #include <dev/mps/mpi/mpi2_cnfg.h>
84 #include <dev/mps/mpi/mpi2_init.h>
85 #include <dev/mps/mpi/mpi2_tool.h>
86 #include <dev/mps/mps_ioctl.h>
87 #include <dev/mps/mpsvar.h>
88 #include <dev/mps/mps_table.h>
89 #include <dev/mps/mps_sas.h>
91 #define MPSSAS_DISCOVERY_TIMEOUT 20
92 #define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */
95 * static array to check SCSI OpCode for EEDP protection bits
97 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
98 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
99 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
100 static uint8_t op_code_prot[256] = {
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
105 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
110 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
114 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
115 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
116 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
119 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
121 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
122 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
123 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
124 static void mpssas_poll(struct cam_sim *sim);
125 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
126 struct mps_command *cm);
127 static void mpssas_scsiio_timeout(void *data);
128 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
129 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
130 struct mps_command *cm, union ccb *ccb);
131 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
132 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
133 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
134 #if __FreeBSD_version >= 900026
135 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
136 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
138 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
139 #endif //FreeBSD_version >= 900026
140 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
141 static void mpssas_async(void *callback_arg, uint32_t code,
142 struct cam_path *path, void *arg);
143 #if (__FreeBSD_version < 901503) || \
144 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
145 static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
146 struct ccb_getdev *cgd);
147 static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb);
149 static int mpssas_send_portenable(struct mps_softc *sc);
150 static void mpssas_portenable_complete(struct mps_softc *sc,
151 struct mps_command *cm);
153 struct mpssas_target *
154 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
156 struct mpssas_target *target;
159 for (i = start; i < sassc->maxtargets; i++) {
160 target = &sassc->targets[i];
161 if (target->handle == handle)
168 /* we need to freeze the simq during attach and diag reset, to avoid failing
169 * commands before device handles have been found by discovery. Since
170 * discovery involves reading config pages and possibly sending commands,
171 * discovery actions may continue even after we receive the end of discovery
172 * event, so refcount discovery actions instead of assuming we can unfreeze
173 * the simq when we get the event.
176 mpssas_startup_increment(struct mpssas_softc *sassc)
178 MPS_FUNCTRACE(sassc->sc);
180 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
181 if (sassc->startup_refcount++ == 0) {
182 /* just starting, freeze the simq */
183 mps_dprint(sassc->sc, MPS_INIT,
184 "%s freezing simq\n", __func__);
185 #if __FreeBSD_version >= 1000039
188 xpt_freeze_simq(sassc->sim, 1);
190 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
191 sassc->startup_refcount);
196 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
198 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
199 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
200 xpt_release_simq(sassc->sim, 1);
201 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
206 mpssas_startup_decrement(struct mpssas_softc *sassc)
208 MPS_FUNCTRACE(sassc->sc);
210 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
211 if (--sassc->startup_refcount == 0) {
212 /* finished all discovery-related actions, release
213 * the simq and rescan for the latest topology.
215 mps_dprint(sassc->sc, MPS_INIT,
216 "%s releasing simq\n", __func__);
217 sassc->flags &= ~MPSSAS_IN_STARTUP;
218 xpt_release_simq(sassc->sim, 1);
219 #if __FreeBSD_version >= 1000039
222 mpssas_rescan_target(sassc->sc, NULL);
225 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
226 sassc->startup_refcount);
231 * The firmware requires us to stop sending commands when we're doing task
233 * XXX The logic for serializing the device has been made lazy and moved to
234 * mpssas_prepare_for_tm().
237 mpssas_alloc_tm(struct mps_softc *sc)
239 struct mps_command *tm;
241 tm = mps_alloc_high_priority_command(sc);
246 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
248 int target_id = 0xFFFFFFFF;
254 * For TM's the devq is frozen for the device. Unfreeze it here and
255 * free the resources used for freezing the devq. Must clear the
256 * INRESET flag as well or scsi I/O will not work.
258 if (tm->cm_targ != NULL) {
259 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
260 target_id = tm->cm_targ->tid;
263 mps_dprint(sc, MPS_INFO, "Unfreezing devq for target ID %d\n",
265 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
266 xpt_free_path(tm->cm_ccb->ccb_h.path);
267 xpt_free_ccb(tm->cm_ccb);
270 mps_free_high_priority_command(sc, tm);
274 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
276 struct mpssas_softc *sassc = sc->sassc;
278 target_id_t targetid;
282 pathid = cam_sim_path(sassc->sim);
284 targetid = CAM_TARGET_WILDCARD;
286 targetid = targ - sassc->targets;
289 * Allocate a CCB and schedule a rescan.
291 ccb = xpt_alloc_ccb_nowait();
293 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
297 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
298 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
299 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
304 if (targetid == CAM_TARGET_WILDCARD)
305 ccb->ccb_h.func_code = XPT_SCAN_BUS;
307 ccb->ccb_h.func_code = XPT_SCAN_TGT;
309 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
314 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
324 /* No need to be in here if debugging isn't enabled */
325 if ((cm->cm_sc->mps_debug & level) == 0)
328 sbuf_new(&sb, str, sizeof(str), 0);
332 if (cm->cm_ccb != NULL) {
333 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
335 sbuf_cat(&sb, path_str);
336 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
337 scsi_command_string(&cm->cm_ccb->csio, &sb);
338 sbuf_printf(&sb, "length %d ",
339 cm->cm_ccb->csio.dxfer_len);
343 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
344 cam_sim_name(cm->cm_sc->sassc->sim),
345 cam_sim_unit(cm->cm_sc->sassc->sim),
346 cam_sim_bus(cm->cm_sc->sassc->sim),
347 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
351 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
352 sbuf_vprintf(&sb, fmt, ap);
354 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
361 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
363 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
364 struct mpssas_target *targ;
369 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
370 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
374 /* XXX retry the remove after the diag reset completes? */
375 mps_dprint(sc, MPS_FAULT,
376 "%s NULL reply resetting device 0x%04x\n", __func__,
378 mpssas_free_tm(sc, tm);
382 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
383 MPI2_IOCSTATUS_SUCCESS) {
384 mps_dprint(sc, MPS_ERROR,
385 "IOCStatus = 0x%x while resetting device 0x%x\n",
386 le16toh(reply->IOCStatus), handle);
389 mps_dprint(sc, MPS_XINFO,
390 "Reset aborted %u commands\n", reply->TerminationCount);
391 mps_free_reply(sc, tm->cm_reply_data);
392 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
394 mps_dprint(sc, MPS_XINFO,
395 "clearing target %u handle 0x%04x\n", targ->tid, handle);
398 * Don't clear target if remove fails because things will get confusing.
399 * Leave the devname and sasaddr intact so that we know to avoid reusing
400 * this target id if possible, and so we can assign the same target id
401 * to this device if it comes back in the future.
403 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
404 MPI2_IOCSTATUS_SUCCESS) {
407 targ->encl_handle = 0x0;
408 targ->encl_slot = 0x0;
409 targ->exp_dev_handle = 0x0;
411 targ->linkrate = 0x0;
416 mpssas_free_tm(sc, tm);
421 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
422 * Otherwise Volume Delete is same as Bare Drive Removal.
425 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
427 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
428 struct mps_softc *sc;
429 struct mps_command *cm;
430 struct mpssas_target *targ = NULL;
432 MPS_FUNCTRACE(sassc->sc);
437 * If this is a WD controller, determine if the disk should be exposed
438 * to the OS or not. If disk should be exposed, return from this
439 * function without doing anything.
441 if (sc->WD_available && (sc->WD_hide_expose ==
442 MPS_WD_EXPOSE_ALWAYS)) {
447 targ = mpssas_find_target_by_handle(sassc, 0, handle);
449 /* FIXME: what is the action? */
450 /* We don't know about this device? */
451 mps_dprint(sc, MPS_ERROR,
452 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
456 targ->flags |= MPSSAS_TARGET_INREMOVAL;
458 cm = mpssas_alloc_tm(sc);
460 mps_dprint(sc, MPS_ERROR,
461 "%s: command alloc failure\n", __func__);
465 mpssas_rescan_target(sc, targ);
467 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
468 req->DevHandle = targ->handle;
469 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
470 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
472 /* SAS Hard Link Reset / SATA Link Reset */
473 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
477 cm->cm_complete = mpssas_remove_volume;
478 cm->cm_complete_data = (void *)(uintptr_t)handle;
480 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
481 __func__, targ->tid);
482 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
484 mps_map_command(sc, cm);
488 * The MPT2 firmware performs debounce on the link to avoid transient link
489 * errors and false removals. When it does decide that link has been lost
490 * and a device need to go away, it expects that the host will perform a
491 * target reset and then an op remove. The reset has the side-effect of
492 * aborting any outstanding requests for the device, which is required for
493 * the op-remove to succeed. It's not clear if the host should check for
494 * the device coming back alive after the reset.
497 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
499 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
500 struct mps_softc *sc;
501 struct mps_command *cm;
502 struct mpssas_target *targ = NULL;
504 MPS_FUNCTRACE(sassc->sc);
508 targ = mpssas_find_target_by_handle(sassc, 0, handle);
510 /* FIXME: what is the action? */
511 /* We don't know about this device? */
512 mps_dprint(sc, MPS_ERROR,
513 "%s : invalid handle 0x%x \n", __func__, handle);
517 targ->flags |= MPSSAS_TARGET_INREMOVAL;
519 cm = mpssas_alloc_tm(sc);
521 mps_dprint(sc, MPS_ERROR,
522 "%s: command alloc failure\n", __func__);
526 mpssas_rescan_target(sc, targ);
528 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
529 memset(req, 0, sizeof(*req));
530 req->DevHandle = htole16(targ->handle);
531 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
532 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
534 /* SAS Hard Link Reset / SATA Link Reset */
535 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
539 cm->cm_complete = mpssas_remove_device;
540 cm->cm_complete_data = (void *)(uintptr_t)handle;
542 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
543 __func__, targ->tid);
544 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
546 mps_map_command(sc, cm);
550 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
552 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
553 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
554 struct mpssas_target *targ;
555 struct mps_command *next_cm;
560 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
561 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
565 * Currently there should be no way we can hit this case. It only
566 * happens when we have a failure to allocate chain frames, and
567 * task management commands don't have S/G lists.
569 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
570 mps_dprint(sc, MPS_ERROR,
571 "%s: cm_flags = %#x for remove of handle %#04x! "
572 "This should not happen!\n", __func__, tm->cm_flags,
577 /* XXX retry the remove after the diag reset completes? */
578 mps_dprint(sc, MPS_FAULT,
579 "%s NULL reply resetting device 0x%04x\n", __func__,
581 mpssas_free_tm(sc, tm);
585 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
586 MPI2_IOCSTATUS_SUCCESS) {
587 mps_dprint(sc, MPS_ERROR,
588 "IOCStatus = 0x%x while resetting device 0x%x\n",
589 le16toh(reply->IOCStatus), handle);
592 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
593 le32toh(reply->TerminationCount));
594 mps_free_reply(sc, tm->cm_reply_data);
595 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
597 /* Reuse the existing command */
598 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
599 memset(req, 0, sizeof(*req));
600 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
601 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
602 req->DevHandle = htole16(handle);
604 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
605 tm->cm_complete = mpssas_remove_complete;
606 tm->cm_complete_data = (void *)(uintptr_t)handle;
608 mps_map_command(sc, tm);
610 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
612 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) {
615 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm);
616 ccb = tm->cm_complete_data;
617 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
618 mpssas_scsiio_complete(sc, tm);
623 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
625 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
627 struct mpssas_target *targ;
628 struct mpssas_lun *lun;
632 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
633 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
636 * Currently there should be no way we can hit this case. It only
637 * happens when we have a failure to allocate chain frames, and
638 * task management commands don't have S/G lists.
640 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
641 mps_dprint(sc, MPS_XINFO,
642 "%s: cm_flags = %#x for remove of handle %#04x! "
643 "This should not happen!\n", __func__, tm->cm_flags,
645 mpssas_free_tm(sc, tm);
650 /* most likely a chip reset */
651 mps_dprint(sc, MPS_FAULT,
652 "%s NULL reply removing device 0x%04x\n", __func__, handle);
653 mpssas_free_tm(sc, tm);
657 mps_dprint(sc, MPS_XINFO,
658 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
659 handle, le16toh(reply->IOCStatus));
662 * Don't clear target if remove fails because things will get confusing.
663 * Leave the devname and sasaddr intact so that we know to avoid reusing
664 * this target id if possible, and so we can assign the same target id
665 * to this device if it comes back in the future.
667 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
668 MPI2_IOCSTATUS_SUCCESS) {
671 targ->encl_handle = 0x0;
672 targ->encl_slot = 0x0;
673 targ->exp_dev_handle = 0x0;
675 targ->linkrate = 0x0;
679 while(!SLIST_EMPTY(&targ->luns)) {
680 lun = SLIST_FIRST(&targ->luns);
681 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
687 mpssas_free_tm(sc, tm);
691 mpssas_register_events(struct mps_softc *sc)
693 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
696 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
697 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
698 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
699 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
700 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
701 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
702 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
703 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
704 setbit(events, MPI2_EVENT_IR_VOLUME);
705 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
706 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
707 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
709 mps_register_events(sc, events, mpssas_evt_handler, NULL,
710 &sc->sassc->mpssas_eh);
716 mps_attach_sas(struct mps_softc *sc)
718 struct mpssas_softc *sassc;
720 int unit, error = 0, reqs;
723 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
725 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
727 mps_dprint(sc, MPS_INIT|MPS_ERROR,
728 "Cannot allocate SAS controller memory\n");
733 * XXX MaxTargets could change during a reinit. Since we don't
734 * resize the targets[] array during such an event, cache the value
735 * of MaxTargets here so that we don't get into trouble later. This
736 * should move into the reinit logic.
738 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
739 sassc->targets = malloc(sizeof(struct mpssas_target) *
740 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
741 if(!sassc->targets) {
742 mps_dprint(sc, MPS_INIT|MPS_ERROR,
743 "Cannot allocate SAS target memory\n");
750 reqs = sc->num_reqs - sc->num_prireqs - 1;
751 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
752 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
757 unit = device_get_unit(sc->mps_dev);
758 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
759 unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
760 if (sassc->sim == NULL) {
761 mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
766 TAILQ_INIT(&sassc->ev_queue);
768 /* Initialize taskqueue for Event Handling */
769 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
770 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
771 taskqueue_thread_enqueue, &sassc->ev_tq);
772 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
773 device_get_nameunit(sc->mps_dev));
778 * XXX There should be a bus for every port on the adapter, but since
779 * we're just going to fake the topology for now, we'll pretend that
780 * everything is just a target on a single bus.
782 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
783 mps_dprint(sc, MPS_INIT|MPS_ERROR,
784 "Error %d registering SCSI bus\n", error);
790 * Assume that discovery events will start right away.
792 * Hold off boot until discovery is complete.
794 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
795 sc->sassc->startup_refcount = 0;
796 mpssas_startup_increment(sassc);
798 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/);
801 * Register for async events so we can determine the EEDP
802 * capabilities of devices.
804 status = xpt_create_path(&sassc->path, /*periph*/NULL,
805 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
807 if (status != CAM_REQ_CMP) {
808 mps_dprint(sc, MPS_ERROR|MPS_INIT,
809 "Error %#x creating sim path\n", status);
814 #if (__FreeBSD_version >= 1000006) || \
815 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
816 event = AC_ADVINFO_CHANGED;
818 event = AC_FOUND_DEVICE;
820 status = xpt_register_async(event, mpssas_async, sc,
822 if (status != CAM_REQ_CMP) {
823 mps_dprint(sc, MPS_ERROR,
824 "Error %#x registering async handler for "
825 "AC_ADVINFO_CHANGED events\n", status);
826 xpt_free_path(sassc->path);
830 if (status != CAM_REQ_CMP) {
832 * EEDP use is the exception, not the rule.
833 * Warn the user, but do not fail to attach.
835 mps_printf(sc, "EEDP capabilities disabled.\n");
840 mpssas_register_events(sc);
845 mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
850 mps_detach_sas(struct mps_softc *sc)
852 struct mpssas_softc *sassc;
853 struct mpssas_lun *lun, *lun_tmp;
854 struct mpssas_target *targ;
859 if (sc->sassc == NULL)
863 mps_deregister_events(sc, sassc->mpssas_eh);
866 * Drain and free the event handling taskqueue with the lock
867 * unheld so that any parallel processing tasks drain properly
868 * without deadlocking.
870 if (sassc->ev_tq != NULL)
871 taskqueue_free(sassc->ev_tq);
873 /* Make sure CAM doesn't wedge if we had to bail out early. */
876 while (sassc->startup_refcount != 0)
877 mpssas_startup_decrement(sassc);
879 /* Deregister our async handler */
880 if (sassc->path != NULL) {
881 xpt_register_async(0, mpssas_async, sc, sassc->path);
882 xpt_free_path(sassc->path);
886 if (sassc->flags & MPSSAS_IN_STARTUP)
887 xpt_release_simq(sassc->sim, 1);
889 if (sassc->sim != NULL) {
890 xpt_bus_deregister(cam_sim_path(sassc->sim));
891 cam_sim_free(sassc->sim, FALSE);
896 if (sassc->devq != NULL)
897 cam_simq_free(sassc->devq);
899 for(i=0; i< sassc->maxtargets ;i++) {
900 targ = &sassc->targets[i];
901 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
905 free(sassc->targets, M_MPT2);
913 mpssas_discovery_end(struct mpssas_softc *sassc)
915 struct mps_softc *sc = sassc->sc;
919 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING)
920 callout_stop(&sassc->discovery_callout);
923 * After discovery has completed, check the mapping table for any
924 * missing devices and update their missing counts. Only do this once
925 * whenever the driver is initialized so that missing counts aren't
926 * updated unnecessarily. Note that just because discovery has
927 * completed doesn't mean that events have been processed yet. The
928 * check_devices function is a callout timer that checks if ALL devices
929 * are missing. If so, it will wait a little longer for events to
930 * complete and keep resetting itself until some device in the mapping
931 * table is not missing, meaning that event processing has started.
933 if (sc->track_mapping_events) {
934 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
935 "completed. Check for missing devices in the mapping "
937 callout_reset(&sc->device_check_callout,
938 MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
944 mpssas_action(struct cam_sim *sim, union ccb *ccb)
946 struct mpssas_softc *sassc;
948 sassc = cam_sim_softc(sim);
950 MPS_FUNCTRACE(sassc->sc);
951 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
952 ccb->ccb_h.func_code);
953 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
955 switch (ccb->ccb_h.func_code) {
958 struct ccb_pathinq *cpi = &ccb->cpi;
959 struct mps_softc *sc = sassc->sc;
961 cpi->version_num = 1;
962 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
963 cpi->target_sprt = 0;
964 #if __FreeBSD_version >= 1000039
965 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
967 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
969 cpi->hba_eng_cnt = 0;
970 cpi->max_target = sassc->maxtargets - 1;
974 * initiator_id is set here to an ID outside the set of valid
975 * target IDs (including volumes).
977 cpi->initiator_id = sassc->maxtargets;
978 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
979 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
980 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
981 cpi->unit_number = cam_sim_unit(sim);
982 cpi->bus_id = cam_sim_bus(sim);
983 cpi->base_transfer_speed = 150000;
984 cpi->transport = XPORT_SAS;
985 cpi->transport_version = 0;
986 cpi->protocol = PROTO_SCSI;
987 cpi->protocol_version = SCSI_REV_SPC;
988 cpi->maxio = sc->maxio;
989 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
992 case XPT_GET_TRAN_SETTINGS:
994 struct ccb_trans_settings *cts;
995 struct ccb_trans_settings_sas *sas;
996 struct ccb_trans_settings_scsi *scsi;
997 struct mpssas_target *targ;
1000 sas = &cts->xport_specific.sas;
1001 scsi = &cts->proto_specific.scsi;
1003 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
1004 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
1005 cts->ccb_h.target_id));
1006 targ = &sassc->targets[cts->ccb_h.target_id];
1007 if (targ->handle == 0x0) {
1008 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1012 cts->protocol_version = SCSI_REV_SPC2;
1013 cts->transport = XPORT_SAS;
1014 cts->transport_version = 0;
1016 sas->valid = CTS_SAS_VALID_SPEED;
1017 switch (targ->linkrate) {
1019 sas->bitrate = 150000;
1022 sas->bitrate = 300000;
1025 sas->bitrate = 600000;
1031 cts->protocol = PROTO_SCSI;
1032 scsi->valid = CTS_SCSI_VALID_TQ;
1033 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1035 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1038 case XPT_CALC_GEOMETRY:
1039 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1040 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1043 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1044 mpssas_action_resetdev(sassc, ccb);
1049 mps_dprint(sassc->sc, MPS_XINFO,
1050 "mpssas_action faking success for abort or reset\n");
1051 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1054 mpssas_action_scsiio(sassc, ccb);
1056 #if __FreeBSD_version >= 900026
1058 mpssas_action_smpio(sassc, ccb);
1062 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1070 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1071 target_id_t target_id, lun_id_t lun_id)
1073 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1074 struct cam_path *path;
1076 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1077 ac_code, target_id, (uintmax_t)lun_id);
1079 if (xpt_create_path(&path, NULL,
1080 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1081 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1086 xpt_async(ac_code, path, NULL);
1087 xpt_free_path(path);
1091 mpssas_complete_all_commands(struct mps_softc *sc)
1093 struct mps_command *cm;
1098 mtx_assert(&sc->mps_mtx, MA_OWNED);
1100 /* complete all commands with a NULL reply */
1101 for (i = 1; i < sc->num_reqs; i++) {
1102 cm = &sc->commands[i];
1103 if (cm->cm_state == MPS_CM_STATE_FREE)
1106 cm->cm_state = MPS_CM_STATE_BUSY;
1107 cm->cm_reply = NULL;
1110 if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1112 free(cm->cm_data, M_MPT2);
1116 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1117 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1119 if (cm->cm_complete != NULL) {
1120 mpssas_log_command(cm, MPS_RECOVERY,
1121 "completing cm %p state %x ccb %p for diag reset\n",
1122 cm, cm->cm_state, cm->cm_ccb);
1124 cm->cm_complete(sc, cm);
1126 } else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1127 mpssas_log_command(cm, MPS_RECOVERY,
1128 "waking up cm %p state %x ccb %p for diag reset\n",
1129 cm, cm->cm_state, cm->cm_ccb);
1134 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1135 /* this should never happen, but if it does, log */
1136 mpssas_log_command(cm, MPS_RECOVERY,
1137 "cm %p state %x flags 0x%x ccb %p during diag "
1138 "reset\n", cm, cm->cm_state, cm->cm_flags,
1143 sc->io_cmds_active = 0;
1147 mpssas_handle_reinit(struct mps_softc *sc)
1151 /* Go back into startup mode and freeze the simq, so that CAM
1152 * doesn't send any commands until after we've rediscovered all
1153 * targets and found the proper device handles for them.
1155 * After the reset, portenable will trigger discovery, and after all
1156 * discovery-related activities have finished, the simq will be
1159 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1160 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1161 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1162 mpssas_startup_increment(sc->sassc);
1164 /* notify CAM of a bus reset */
1165 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1168 /* complete and cleanup after all outstanding commands */
1169 mpssas_complete_all_commands(sc);
1171 mps_dprint(sc, MPS_INIT,
1172 "%s startup %u after command completion\n", __func__,
1173 sc->sassc->startup_refcount);
1175 /* zero all the target handles, since they may change after the
1176 * reset, and we have to rediscover all the targets and use the new
1179 for (i = 0; i < sc->sassc->maxtargets; i++) {
1180 if (sc->sassc->targets[i].outstanding != 0)
1181 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1182 i, sc->sassc->targets[i].outstanding);
1183 sc->sassc->targets[i].handle = 0x0;
1184 sc->sassc->targets[i].exp_dev_handle = 0x0;
1185 sc->sassc->targets[i].outstanding = 0;
1186 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1191 mpssas_tm_timeout(void *data)
1193 struct mps_command *tm = data;
1194 struct mps_softc *sc = tm->cm_sc;
1196 mtx_assert(&sc->mps_mtx, MA_OWNED);
1198 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1199 "task mgmt %p timed out\n", tm);
1201 KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1202 ("command not inqueue\n"));
1204 tm->cm_state = MPS_CM_STATE_BUSY;
1209 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1211 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1212 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1213 unsigned int cm_count = 0;
1214 struct mps_command *cm;
1215 struct mpssas_target *targ;
1217 callout_stop(&tm->cm_callout);
1219 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1220 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1224 * Currently there should be no way we can hit this case. It only
1225 * happens when we have a failure to allocate chain frames, and
1226 * task management commands don't have S/G lists.
1227 * XXXSL So should it be an assertion?
1229 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1230 mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1231 "%s: cm_flags = %#x for LUN reset! "
1232 "This should not happen!\n", __func__, tm->cm_flags);
1233 mpssas_free_tm(sc, tm);
1237 if (reply == NULL) {
1238 mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1240 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1241 /* this completion was due to a reset, just cleanup */
1242 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1243 "reset, ignoring NULL LUN reset reply\n");
1245 mpssas_free_tm(sc, tm);
1248 /* we should have gotten a reply. */
1249 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1250 "LUN reset attempt, resetting controller\n");
1256 mps_dprint(sc, MPS_RECOVERY,
1257 "logical unit reset status 0x%x code 0x%x count %u\n",
1258 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1259 le32toh(reply->TerminationCount));
1262 * See if there are any outstanding commands for this LUN.
1263 * This could be made more efficient by using a per-LU data
1264 * structure of some sort.
1266 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1267 if (cm->cm_lun == tm->cm_lun)
1271 if (cm_count == 0) {
1272 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1273 "Finished recovery after LUN reset for target %u\n",
1276 mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1279 * We've finished recovery for this logical unit. check and
1280 * see if some other logical unit has a timedout command
1281 * that needs to be processed.
1283 cm = TAILQ_FIRST(&targ->timedout_commands);
1285 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1286 "More commands to abort for target %u\n",
1288 mpssas_send_abort(sc, tm, cm);
1291 mpssas_free_tm(sc, tm);
1295 * If we still have commands for this LUN, the reset
1296 * effectively failed, regardless of the status reported.
1297 * Escalate to a target reset.
1299 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1300 "logical unit reset complete for target %u, but still "
1301 "have %u command(s), sending target reset\n", targ->tid,
1303 mpssas_send_reset(sc, tm,
1304 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1309 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1311 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1312 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1313 struct mpssas_target *targ;
1315 callout_stop(&tm->cm_callout);
1317 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1318 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1322 * Currently there should be no way we can hit this case. It only
1323 * happens when we have a failure to allocate chain frames, and
1324 * task management commands don't have S/G lists.
1326 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1327 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1328 "This should not happen!\n", __func__, tm->cm_flags);
1329 mpssas_free_tm(sc, tm);
1333 if (reply == NULL) {
1334 mps_dprint(sc, MPS_RECOVERY,
1335 "NULL target reset reply for tm %pi TaskMID %u\n",
1336 tm, le16toh(req->TaskMID));
1337 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1338 /* this completion was due to a reset, just cleanup */
1339 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1340 "reset, ignoring NULL target reset reply\n");
1342 mpssas_free_tm(sc, tm);
1344 /* we should have gotten a reply. */
1345 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1346 "target reset attempt, resetting controller\n");
1352 mps_dprint(sc, MPS_RECOVERY,
1353 "target reset status 0x%x code 0x%x count %u\n",
1354 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1355 le32toh(reply->TerminationCount));
1357 if (targ->outstanding == 0) {
1358 /* we've finished recovery for this target and all
1359 * of its logical units.
1361 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1362 "Finished reset recovery for target %u\n", targ->tid);
1364 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1368 mpssas_free_tm(sc, tm);
1371 * After a target reset, if this target still has
1372 * outstanding commands, the reset effectively failed,
1373 * regardless of the status reported. escalate.
1375 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1376 "Target reset complete for target %u, but still have %u "
1377 "command(s), resetting controller\n", targ->tid,
1383 #define MPS_RESET_TIMEOUT 30
1386 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1388 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1389 struct mpssas_target *target;
1392 target = tm->cm_targ;
1393 if (target->handle == 0) {
1394 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1395 __func__, target->tid);
1399 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1400 req->DevHandle = htole16(target->handle);
1401 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1402 req->TaskType = type;
1404 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1405 /* XXX Need to handle invalid LUNs */
1406 MPS_SET_LUN(req->LUN, tm->cm_lun);
1407 tm->cm_targ->logical_unit_resets++;
1408 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1409 "Sending logical unit reset to target %u lun %d\n",
1410 target->tid, tm->cm_lun);
1411 tm->cm_complete = mpssas_logical_unit_reset_complete;
1412 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1413 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1415 * Target reset method =
1416 * SAS Hard Link Reset / SATA Link Reset
1418 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1419 tm->cm_targ->target_resets++;
1420 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1421 "Sending target reset to target %u\n", target->tid);
1422 tm->cm_complete = mpssas_target_reset_complete;
1423 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1425 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1430 tm->cm_complete_data = (void *)tm;
1432 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1433 mpssas_tm_timeout, tm);
1435 err = mps_map_command(sc, tm);
1437 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1438 "error %d sending reset type %u\n",
1446 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1448 struct mps_command *cm;
1449 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1450 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1451 struct mpssas_target *targ;
1453 callout_stop(&tm->cm_callout);
1455 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1456 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1460 * Currently there should be no way we can hit this case. It only
1461 * happens when we have a failure to allocate chain frames, and
1462 * task management commands don't have S/G lists.
1464 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1465 mps_dprint(sc, MPS_RECOVERY,
1466 "cm_flags = %#x for abort %p TaskMID %u!\n",
1467 tm->cm_flags, tm, le16toh(req->TaskMID));
1468 mpssas_free_tm(sc, tm);
1472 if (reply == NULL) {
1473 mps_dprint(sc, MPS_RECOVERY,
1474 "NULL abort reply for tm %p TaskMID %u\n",
1475 tm, le16toh(req->TaskMID));
1476 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1477 /* this completion was due to a reset, just cleanup */
1478 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1479 "reset, ignoring NULL abort reply\n");
1481 mpssas_free_tm(sc, tm);
1483 /* we should have gotten a reply. */
1484 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1485 "abort attempt, resetting controller\n");
1491 mps_dprint(sc, MPS_RECOVERY,
1492 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1493 le16toh(req->TaskMID),
1494 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1495 le32toh(reply->TerminationCount));
1497 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1500 * If there are no more timedout commands, we're done with
1501 * error recovery for this target.
1503 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1504 "Finished abort recovery for target %u\n", targ->tid);
1507 mpssas_free_tm(sc, tm);
1508 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1509 /* abort success, but we have more timedout commands to abort */
1510 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1511 "Continuing abort recovery for target %u\n", targ->tid);
1513 mpssas_send_abort(sc, tm, cm);
1515 /* we didn't get a command completion, so the abort
1516 * failed as far as we're concerned. escalate.
1518 mps_dprint(sc, MPS_RECOVERY,
1519 "Abort failed for target %u, sending logical unit reset\n",
1522 mpssas_send_reset(sc, tm,
1523 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1527 #define MPS_ABORT_TIMEOUT 5
1530 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1532 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1533 struct mpssas_target *targ;
1537 if (targ->handle == 0) {
1538 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1539 "%s null devhandle for target_id %d\n",
1540 __func__, cm->cm_ccb->ccb_h.target_id);
1544 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1545 "Aborting command %p\n", cm);
1547 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1548 req->DevHandle = htole16(targ->handle);
1549 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
1550 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1552 /* XXX Need to handle invalid LUNs */
1553 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1555 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1558 tm->cm_complete = mpssas_abort_complete;
1559 tm->cm_complete_data = (void *)tm;
1560 tm->cm_targ = cm->cm_targ;
1561 tm->cm_lun = cm->cm_lun;
1563 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1564 mpssas_tm_timeout, tm);
1568 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1570 err = mps_map_command(sc, tm);
1572 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1573 "error %d sending abort for cm %p SMID %u\n",
1574 err, cm, req->TaskMID);
1579 mpssas_scsiio_timeout(void *data)
1581 sbintime_t elapsed, now;
1583 struct mps_softc *sc;
1584 struct mps_command *cm;
1585 struct mpssas_target *targ;
1587 cm = (struct mps_command *)data;
1593 mtx_assert(&sc->mps_mtx, MA_OWNED);
1595 mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", sc);
1598 * Run the interrupt handler to make sure it's not pending. This
1599 * isn't perfect because the command could have already completed
1600 * and been re-used, though this is unlikely.
1602 mps_intr_locked(sc);
1603 if (cm->cm_state != MPS_CM_STATE_INQUEUE) {
1604 mpssas_log_command(cm, MPS_XINFO,
1605 "SCSI command %p almost timed out\n", cm);
1609 if (cm->cm_ccb == NULL) {
1610 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1617 elapsed = now - ccb->ccb_h.qos.sim_data;
1618 mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1619 "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1620 targ->tid, targ->handle, ccb->ccb_h.timeout,
1621 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1623 /* XXX first, check the firmware state, to see if it's still
1624 * operational. if not, do a diag reset.
1626 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1627 cm->cm_state = MPS_CM_STATE_TIMEDOUT;
1628 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1630 if (targ->tm != NULL) {
1631 /* target already in recovery, just queue up another
1632 * timedout command to be processed later.
1634 mps_dprint(sc, MPS_RECOVERY,
1635 "queued timedout cm %p for processing by tm %p\n",
1637 } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1638 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1639 "Sending abort to target %u for SMID %d\n", targ->tid,
1640 cm->cm_desc.Default.SMID);
1641 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1644 /* start recovery by aborting the first timedout command */
1645 mpssas_send_abort(sc, targ->tm, cm);
1647 /* XXX queue this target up for recovery once a TM becomes
1648 * available. The firmware only has a limited number of
1649 * HighPriority credits for the high priority requests used
1650 * for task management, and we ran out.
1652 * Isilon: don't worry about this for now, since we have
1653 * more credits than disks in an enclosure, and limit
1654 * ourselves to one TM per target for recovery.
1656 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1657 "timedout cm %p failed to allocate a tm\n", cm);
1663 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1665 MPI2_SCSI_IO_REQUEST *req;
1666 struct ccb_scsiio *csio;
1667 struct mps_softc *sc;
1668 struct mpssas_target *targ;
1669 struct mpssas_lun *lun;
1670 struct mps_command *cm;
1671 uint8_t i, lba_byte, *ref_tag_addr;
1672 uint16_t eedp_flags;
1673 uint32_t mpi_control;
1677 mtx_assert(&sc->mps_mtx, MA_OWNED);
1680 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1681 ("Target %d out of bounds in XPT_SCSI_IO\n",
1682 csio->ccb_h.target_id));
1683 targ = &sassc->targets[csio->ccb_h.target_id];
1684 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1685 if (targ->handle == 0x0) {
1686 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1687 __func__, csio->ccb_h.target_id);
1688 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1692 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1693 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1694 "supported %u\n", __func__, csio->ccb_h.target_id);
1695 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1700 * Sometimes, it is possible to get a command that is not "In
1701 * Progress" and was actually aborted by the upper layer. Check for
1702 * this here and complete the command without error.
1704 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1705 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1706 "target %u\n", __func__, csio->ccb_h.target_id);
1711 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1712 * that the volume has timed out. We want volumes to be enumerated
1713 * until they are deleted/removed, not just failed.
1715 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1716 if (targ->devinfo == 0)
1717 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1719 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
1724 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1725 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1726 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1732 * If target has a reset in progress, freeze the devq and return. The
1733 * devq will be released when the TM reset is finished.
1735 if (targ->flags & MPSSAS_TARGET_INRESET) {
1736 ccb->ccb_h.status = CAM_BUSY | CAM_DEV_QFRZN;
1737 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1738 __func__, targ->tid);
1739 xpt_freeze_devq(ccb->ccb_h.path, 1);
1744 cm = mps_alloc_command(sc);
1745 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1747 mps_free_command(sc, cm);
1749 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1750 xpt_freeze_simq(sassc->sim, 1);
1751 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1753 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1754 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1759 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1760 bzero(req, sizeof(*req));
1761 req->DevHandle = htole16(targ->handle);
1762 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1764 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1765 req->SenseBufferLength = MPS_SENSE_LEN;
1767 req->ChainOffset = 0;
1768 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1773 req->DataLength = htole32(csio->dxfer_len);
1774 req->BidirectionalDataLength = 0;
1775 req->IoFlags = htole16(csio->cdb_len);
1778 /* Note: BiDirectional transfers are not supported */
1779 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1781 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1782 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1785 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1786 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1790 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1794 if (csio->cdb_len == 32)
1795 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1797 * It looks like the hardware doesn't require an explicit tag
1798 * number for each transaction. SAM Task Management not supported
1801 switch (csio->tag_action) {
1802 case MSG_HEAD_OF_Q_TAG:
1803 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1805 case MSG_ORDERED_Q_TAG:
1806 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1809 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1811 case CAM_TAG_ACTION_NONE:
1812 case MSG_SIMPLE_Q_TAG:
1814 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1817 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1818 req->Control = htole32(mpi_control);
1819 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1820 mps_free_command(sc, cm);
1821 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1826 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1827 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1829 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1830 req->IoFlags = htole16(csio->cdb_len);
1833 * Check if EEDP is supported and enabled. If it is then check if the
1834 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1835 * is formatted for EEDP support. If all of this is true, set CDB up
1836 * for EEDP transfer.
1838 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1839 if (sc->eedp_enabled && eedp_flags) {
1840 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1841 if (lun->lun_id == csio->ccb_h.target_lun) {
1846 if ((lun != NULL) && (lun->eedp_formatted)) {
1847 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1848 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1849 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1850 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1851 req->EEDPFlags = htole16(eedp_flags);
1854 * If CDB less than 32, fill in Primary Ref Tag with
1855 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1856 * already there. Also, set protection bit. FreeBSD
1857 * currently does not support CDBs bigger than 16, but
1858 * the code doesn't hurt, and will be here for the
1861 if (csio->cdb_len != 32) {
1862 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1863 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1864 PrimaryReferenceTag;
1865 for (i = 0; i < 4; i++) {
1867 req->CDB.CDB32[lba_byte + i];
1870 req->CDB.EEDP32.PrimaryReferenceTag =
1871 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1872 req->CDB.EEDP32.PrimaryApplicationTagMask =
1874 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1878 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1879 req->EEDPFlags = htole16(eedp_flags);
1880 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1886 cm->cm_length = csio->dxfer_len;
1887 if (cm->cm_length != 0) {
1889 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1893 cm->cm_sge = &req->SGL;
1894 cm->cm_sglsize = (32 - 24) * 4;
1895 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1896 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1897 cm->cm_complete = mpssas_scsiio_complete;
1898 cm->cm_complete_data = ccb;
1900 cm->cm_lun = csio->ccb_h.target_lun;
1904 * If HBA is a WD and the command is not for a retry, try to build a
1905 * direct I/O message. If failed, or the command is for a retry, send
1906 * the I/O to the IR volume itself.
1908 if (sc->WD_valid_config) {
1909 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1910 mpssas_direct_drive_io(sassc, cm, ccb);
1912 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1916 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1917 if (csio->bio != NULL)
1918 biotrack(csio->bio, __func__);
1920 csio->ccb_h.qos.sim_data = sbinuptime();
1921 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1922 mpssas_scsiio_timeout, cm, 0);
1925 targ->outstanding++;
1926 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1927 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1929 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1930 __func__, cm, ccb, targ->outstanding);
1932 mps_map_command(sc, cm);
1937 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1940 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1941 Mpi2SCSIIOReply_t *mpi_reply)
1945 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1946 MPI2_IOCSTATUS_MASK;
1947 u8 scsi_state = mpi_reply->SCSIState;
1948 u8 scsi_status = mpi_reply->SCSIStatus;
1949 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1950 const char *desc_ioc_state, *desc_scsi_status;
1952 if (log_info == 0x31170000)
1955 desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1957 desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1960 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1961 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1964 *We can add more detail about underflow data here
1967 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1968 "scsi_state %b\n", desc_scsi_status, scsi_status,
1969 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1970 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1972 if (sc->mps_debug & MPS_XINFO &&
1973 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1974 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1975 scsi_sense_print(csio);
1976 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1979 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1980 response_info = le32toh(mpi_reply->ResponseInfo);
1981 response_bytes = (u8 *)&response_info;
1982 mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1984 mps_describe_table(mps_scsi_taskmgmt_string,
1985 response_bytes[0]));
1990 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1992 MPI2_SCSI_IO_REPLY *rep;
1994 struct ccb_scsiio *csio;
1995 struct mpssas_softc *sassc;
1996 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1997 u8 *TLR_bits, TLR_on;
2000 struct mpssas_target *target;
2001 target_id_t target_id;
2004 mps_dprint(sc, MPS_TRACE,
2005 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
2006 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
2007 cm->cm_targ->outstanding);
2009 callout_stop(&cm->cm_callout);
2010 mtx_assert(&sc->mps_mtx, MA_OWNED);
2013 ccb = cm->cm_complete_data;
2015 target_id = csio->ccb_h.target_id;
2016 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
2018 * XXX KDM if the chain allocation fails, does it matter if we do
2019 * the sync and unload here? It is simpler to do it in every case,
2020 * assuming it doesn't cause problems.
2022 if (cm->cm_data != NULL) {
2023 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
2024 dir = BUS_DMASYNC_POSTREAD;
2025 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
2026 dir = BUS_DMASYNC_POSTWRITE;
2027 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2028 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2031 cm->cm_targ->completed++;
2032 cm->cm_targ->outstanding--;
2033 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2034 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2036 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2037 if (ccb->csio.bio != NULL)
2038 biotrack(ccb->csio.bio, __func__);
2041 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) {
2042 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2043 cm->cm_state = MPS_CM_STATE_BUSY;
2044 if (cm->cm_reply != NULL)
2045 mpssas_log_command(cm, MPS_RECOVERY,
2046 "completed timedout cm %p ccb %p during recovery "
2047 "ioc %x scsi %x state %x xfer %u\n",
2048 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2049 rep->SCSIStatus, rep->SCSIState,
2050 le32toh(rep->TransferCount));
2052 mpssas_log_command(cm, MPS_RECOVERY,
2053 "completed timedout cm %p ccb %p during recovery\n",
2055 } else if (cm->cm_targ->tm != NULL) {
2056 if (cm->cm_reply != NULL)
2057 mpssas_log_command(cm, MPS_RECOVERY,
2058 "completed cm %p ccb %p during recovery "
2059 "ioc %x scsi %x state %x xfer %u\n",
2060 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2061 rep->SCSIStatus, rep->SCSIState,
2062 le32toh(rep->TransferCount));
2064 mpssas_log_command(cm, MPS_RECOVERY,
2065 "completed cm %p ccb %p during recovery\n",
2067 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2068 mpssas_log_command(cm, MPS_RECOVERY,
2069 "reset completed cm %p ccb %p\n",
2073 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2075 * We ran into an error after we tried to map the command,
2076 * so we're getting a callback without queueing the command
2077 * to the hardware. So we set the status here, and it will
2078 * be retained below. We'll go through the "fast path",
2079 * because there can be no reply when we haven't actually
2080 * gone out to the hardware.
2082 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2085 * Currently the only error included in the mask is
2086 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2087 * chain frames. We need to freeze the queue until we get
2088 * a command that completed without this error, which will
2089 * hopefully have some chain frames attached that we can
2090 * use. If we wanted to get smarter about it, we would
2091 * only unfreeze the queue in this condition when we're
2092 * sure that we're getting some chain frames back. That's
2093 * probably unnecessary.
2095 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2096 xpt_freeze_simq(sassc->sim, 1);
2097 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2098 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2099 "freezing SIM queue\n");
2104 * If this is a Start Stop Unit command and it was issued by the driver
2105 * during shutdown, decrement the refcount to account for all of the
2106 * commands that were sent. All SSU commands should be completed before
2107 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2110 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2111 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2115 /* Take the fast path to completion */
2116 if (cm->cm_reply == NULL) {
2117 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2118 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2119 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2121 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2122 ccb->csio.scsi_status = SCSI_STATUS_OK;
2124 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2125 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2126 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2127 mps_dprint(sc, MPS_XINFO,
2128 "Unfreezing SIM queue\n");
2133 * There are two scenarios where the status won't be
2134 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2135 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2137 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2139 * Freeze the dev queue so that commands are
2140 * executed in the correct order after error
2143 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2144 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2146 mps_free_command(sc, cm);
2151 mpssas_log_command(cm, MPS_XINFO,
2152 "ioc %x scsi %x state %x xfer %u\n",
2153 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2154 le32toh(rep->TransferCount));
2157 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2158 * Volume if an error occurred (normal I/O retry). Use the original
2159 * CCB, but set a flag that this will be a retry so that it's sent to
2160 * the original volume. Free the command but reuse the CCB.
2162 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2163 mps_free_command(sc, cm);
2164 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2165 mpssas_action_scsiio(sassc, ccb);
2168 ccb->ccb_h.sim_priv.entries[0].field = 0;
2170 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2171 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2172 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2174 case MPI2_IOCSTATUS_SUCCESS:
2175 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2177 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2178 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2179 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2181 /* Completion failed at the transport level. */
2182 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2183 MPI2_SCSI_STATE_TERMINATED)) {
2184 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2188 /* In a modern packetized environment, an autosense failure
2189 * implies that there's not much else that can be done to
2190 * recover the command.
2192 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2193 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2198 * CAM doesn't care about SAS Response Info data, but if this is
2199 * the state check if TLR should be done. If not, clear the
2200 * TLR_bits for the target.
2202 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2203 ((le32toh(rep->ResponseInfo) &
2204 MPI2_SCSI_RI_MASK_REASONCODE) ==
2205 MPS_SCSI_RI_INVALID_FRAME)) {
2206 sc->mapping_table[target_id].TLR_bits =
2207 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2211 * Intentionally override the normal SCSI status reporting
2212 * for these two cases. These are likely to happen in a
2213 * multi-initiator environment, and we want to make sure that
2214 * CAM retries these commands rather than fail them.
2216 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2217 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2218 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2222 /* Handle normal status and sense */
2223 csio->scsi_status = rep->SCSIStatus;
2224 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2225 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2227 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2229 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2230 int sense_len, returned_sense_len;
2232 returned_sense_len = min(le32toh(rep->SenseCount),
2233 sizeof(struct scsi_sense_data));
2234 if (returned_sense_len < ccb->csio.sense_len)
2235 ccb->csio.sense_resid = ccb->csio.sense_len -
2238 ccb->csio.sense_resid = 0;
2240 sense_len = min(returned_sense_len,
2241 ccb->csio.sense_len - ccb->csio.sense_resid);
2242 bzero(&ccb->csio.sense_data,
2243 sizeof(ccb->csio.sense_data));
2244 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2245 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2249 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2250 * and it's page code 0 (Supported Page List), and there is
2251 * inquiry data, and this is for a sequential access device, and
2252 * the device is an SSP target, and TLR is supported by the
2253 * controller, turn the TLR_bits value ON if page 0x90 is
2256 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2257 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2258 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2259 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2260 (csio->data_ptr != NULL) &&
2261 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2262 (sc->control_TLR) &&
2263 (sc->mapping_table[target_id].device_info &
2264 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2265 vpd_list = (struct scsi_vpd_supported_page_list *)
2267 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2268 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2269 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2270 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2271 csio->cdb_io.cdb_bytes[4];
2272 alloc_len -= csio->resid;
2273 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2274 if (vpd_list->list[i] == 0x90) {
2282 * If this is a SATA direct-access end device, mark it so that
2283 * a SCSI StartStopUnit command will be sent to it when the
2284 * driver is being shutdown.
2286 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2287 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2288 (sc->mapping_table[target_id].device_info &
2289 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2290 ((sc->mapping_table[target_id].device_info &
2291 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2292 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2293 target = &sassc->targets[target_id];
2294 target->supports_SSU = TRUE;
2295 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2299 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2300 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2302 * If devinfo is 0 this will be a volume. In that case don't
2303 * tell CAM that the volume is not there. We want volumes to
2304 * be enumerated until they are deleted/removed, not just
2307 if (cm->cm_targ->devinfo == 0)
2308 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2310 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2312 case MPI2_IOCSTATUS_INVALID_SGL:
2313 mps_print_scsiio_cmd(sc, cm);
2314 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2316 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2318 * This is one of the responses that comes back when an I/O
2319 * has been aborted. If it is because of a timeout that we
2320 * initiated, just set the status to CAM_CMD_TIMEOUT.
2321 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2322 * command is the same (it gets retried, subject to the
2323 * retry counter), the only difference is what gets printed
2326 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT)
2327 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2329 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2331 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2332 /* resid is ignored for this condition */
2334 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2336 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2337 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2339 * These can sometimes be transient transport-related
2340 * errors, and sometimes persistent drive-related errors.
2341 * We used to retry these without decrementing the retry
2342 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2343 * we hit a persistent drive problem that returns one of
2344 * these error codes, we would retry indefinitely. So,
2345 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2346 * count and avoid infinite retries. We're taking the
2347 * potential risk of flagging false failures in the event
2348 * of a topology-related error (e.g. a SAS expander problem
2349 * causes a command addressed to a drive to fail), but
2350 * avoiding getting into an infinite retry loop.
2352 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2353 mps_dprint(sc, MPS_INFO,
2354 "Controller reported %s tgt %u SMID %u loginfo %x\n",
2355 mps_describe_table(mps_iocstatus_string,
2356 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2357 target_id, cm->cm_desc.Default.SMID,
2358 le32toh(rep->IOCLogInfo));
2359 mps_dprint(sc, MPS_XINFO,
2360 "SCSIStatus %x SCSIState %x xfercount %u\n",
2361 rep->SCSIStatus, rep->SCSIState,
2362 le32toh(rep->TransferCount));
2364 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2365 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2366 case MPI2_IOCSTATUS_INVALID_VPID:
2367 case MPI2_IOCSTATUS_INVALID_FIELD:
2368 case MPI2_IOCSTATUS_INVALID_STATE:
2369 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2370 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2371 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2372 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2373 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2375 mpssas_log_command(cm, MPS_XINFO,
2376 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2377 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2378 rep->SCSIStatus, rep->SCSIState,
2379 le32toh(rep->TransferCount));
2380 csio->resid = cm->cm_length;
2381 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2385 mps_sc_failed_io_info(sc,csio,rep);
2387 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2388 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2389 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2390 mps_dprint(sc, MPS_XINFO, "Command completed, "
2391 "unfreezing SIM queue\n");
2394 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2395 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2396 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2399 mps_free_command(sc, cm);
2403 /* All Request reached here are Endian safe */
2405 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2407 pMpi2SCSIIORequest_t pIO_req;
2408 struct mps_softc *sc = sassc->sc;
2410 uint32_t physLBA, stripe_offset, stripe_unit;
2411 uint32_t io_size, column;
2412 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2415 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2416 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2417 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2418 * bit different than the 10/16 CDBs, handle them separately.
2420 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2421 CDB = pIO_req->CDB.CDB32;
2424 * Handle 6 byte CDBs.
2426 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2427 (CDB[0] == WRITE_6))) {
2429 * Get the transfer size in blocks.
2431 io_size = (cm->cm_length >> sc->DD_block_exponent);
2434 * Get virtual LBA given in the CDB.
2436 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2437 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2440 * Check that LBA range for I/O does not exceed volume's
2443 if ((virtLBA + (uint64_t)io_size - 1) <=
2446 * Check if the I/O crosses a stripe boundary. If not,
2447 * translate the virtual LBA to a physical LBA and set
2448 * the DevHandle for the PhysDisk to be used. If it
2449 * does cross a boundary, do normal I/O. To get the
2450 * right DevHandle to use, get the map number for the
2451 * column, then use that map number to look up the
2452 * DevHandle of the PhysDisk.
2454 stripe_offset = (uint32_t)virtLBA &
2455 (sc->DD_stripe_size - 1);
2456 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2457 physLBA = (uint32_t)virtLBA >>
2458 sc->DD_stripe_exponent;
2459 stripe_unit = physLBA / sc->DD_num_phys_disks;
2460 column = physLBA % sc->DD_num_phys_disks;
2461 pIO_req->DevHandle =
2462 htole16(sc->DD_column_map[column].dev_handle);
2463 /* ???? Is this endian safe*/
2464 cm->cm_desc.SCSIIO.DevHandle =
2467 physLBA = (stripe_unit <<
2468 sc->DD_stripe_exponent) + stripe_offset;
2469 ptrLBA = &pIO_req->CDB.CDB32[1];
2470 physLBA_byte = (uint8_t)(physLBA >> 16);
2471 *ptrLBA = physLBA_byte;
2472 ptrLBA = &pIO_req->CDB.CDB32[2];
2473 physLBA_byte = (uint8_t)(physLBA >> 8);
2474 *ptrLBA = physLBA_byte;
2475 ptrLBA = &pIO_req->CDB.CDB32[3];
2476 physLBA_byte = (uint8_t)physLBA;
2477 *ptrLBA = physLBA_byte;
2480 * Set flag that Direct Drive I/O is
2483 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2490 * Handle 10, 12 or 16 byte CDBs.
2492 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2493 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2494 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2495 (CDB[0] == WRITE_12))) {
2497 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2498 * are 0. If not, this is accessing beyond 2TB so handle it in
2499 * the else section. 10-byte and 12-byte CDB's are OK.
2500 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2501 * ready to accept 12byte CDB for Direct IOs.
2503 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2504 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2505 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2507 * Get the transfer size in blocks.
2509 io_size = (cm->cm_length >> sc->DD_block_exponent);
2512 * Get virtual LBA. Point to correct lower 4 bytes of
2513 * LBA in the CDB depending on command.
2515 lba_idx = ((CDB[0] == READ_12) ||
2516 (CDB[0] == WRITE_12) ||
2517 (CDB[0] == READ_10) ||
2518 (CDB[0] == WRITE_10))? 2 : 6;
2519 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2520 ((uint64_t)CDB[lba_idx + 1] << 16) |
2521 ((uint64_t)CDB[lba_idx + 2] << 8) |
2522 (uint64_t)CDB[lba_idx + 3];
2525 * Check that LBA range for I/O does not exceed volume's
2528 if ((virtLBA + (uint64_t)io_size - 1) <=
2531 * Check if the I/O crosses a stripe boundary.
2532 * If not, translate the virtual LBA to a
2533 * physical LBA and set the DevHandle for the
2534 * PhysDisk to be used. If it does cross a
2535 * boundary, do normal I/O. To get the right
2536 * DevHandle to use, get the map number for the
2537 * column, then use that map number to look up
2538 * the DevHandle of the PhysDisk.
2540 stripe_offset = (uint32_t)virtLBA &
2541 (sc->DD_stripe_size - 1);
2542 if ((stripe_offset + io_size) <=
2543 sc->DD_stripe_size) {
2544 physLBA = (uint32_t)virtLBA >>
2545 sc->DD_stripe_exponent;
2546 stripe_unit = physLBA /
2547 sc->DD_num_phys_disks;
2549 sc->DD_num_phys_disks;
2550 pIO_req->DevHandle =
2551 htole16(sc->DD_column_map[column].
2553 cm->cm_desc.SCSIIO.DevHandle =
2556 physLBA = (stripe_unit <<
2557 sc->DD_stripe_exponent) +
2560 &pIO_req->CDB.CDB32[lba_idx];
2561 physLBA_byte = (uint8_t)(physLBA >> 24);
2562 *ptrLBA = physLBA_byte;
2564 &pIO_req->CDB.CDB32[lba_idx + 1];
2565 physLBA_byte = (uint8_t)(physLBA >> 16);
2566 *ptrLBA = physLBA_byte;
2568 &pIO_req->CDB.CDB32[lba_idx + 2];
2569 physLBA_byte = (uint8_t)(physLBA >> 8);
2570 *ptrLBA = physLBA_byte;
2572 &pIO_req->CDB.CDB32[lba_idx + 3];
2573 physLBA_byte = (uint8_t)physLBA;
2574 *ptrLBA = physLBA_byte;
2577 * Set flag that Direct Drive I/O is
2580 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2585 * 16-byte CDB and the upper 4 bytes of the CDB are not
2586 * 0. Get the transfer size in blocks.
2588 io_size = (cm->cm_length >> sc->DD_block_exponent);
2593 virtLBA = ((uint64_t)CDB[2] << 54) |
2594 ((uint64_t)CDB[3] << 48) |
2595 ((uint64_t)CDB[4] << 40) |
2596 ((uint64_t)CDB[5] << 32) |
2597 ((uint64_t)CDB[6] << 24) |
2598 ((uint64_t)CDB[7] << 16) |
2599 ((uint64_t)CDB[8] << 8) |
2603 * Check that LBA range for I/O does not exceed volume's
2606 if ((virtLBA + (uint64_t)io_size - 1) <=
2609 * Check if the I/O crosses a stripe boundary.
2610 * If not, translate the virtual LBA to a
2611 * physical LBA and set the DevHandle for the
2612 * PhysDisk to be used. If it does cross a
2613 * boundary, do normal I/O. To get the right
2614 * DevHandle to use, get the map number for the
2615 * column, then use that map number to look up
2616 * the DevHandle of the PhysDisk.
2618 stripe_offset = (uint32_t)virtLBA &
2619 (sc->DD_stripe_size - 1);
2620 if ((stripe_offset + io_size) <=
2621 sc->DD_stripe_size) {
2622 physLBA = (uint32_t)(virtLBA >>
2623 sc->DD_stripe_exponent);
2624 stripe_unit = physLBA /
2625 sc->DD_num_phys_disks;
2627 sc->DD_num_phys_disks;
2628 pIO_req->DevHandle =
2629 htole16(sc->DD_column_map[column].
2631 cm->cm_desc.SCSIIO.DevHandle =
2634 physLBA = (stripe_unit <<
2635 sc->DD_stripe_exponent) +
2639 * Set upper 4 bytes of LBA to 0. We
2640 * assume that the phys disks are less
2641 * than 2 TB's in size. Then, set the
2644 pIO_req->CDB.CDB32[2] = 0;
2645 pIO_req->CDB.CDB32[3] = 0;
2646 pIO_req->CDB.CDB32[4] = 0;
2647 pIO_req->CDB.CDB32[5] = 0;
2648 ptrLBA = &pIO_req->CDB.CDB32[6];
2649 physLBA_byte = (uint8_t)(physLBA >> 24);
2650 *ptrLBA = physLBA_byte;
2651 ptrLBA = &pIO_req->CDB.CDB32[7];
2652 physLBA_byte = (uint8_t)(physLBA >> 16);
2653 *ptrLBA = physLBA_byte;
2654 ptrLBA = &pIO_req->CDB.CDB32[8];
2655 physLBA_byte = (uint8_t)(physLBA >> 8);
2656 *ptrLBA = physLBA_byte;
2657 ptrLBA = &pIO_req->CDB.CDB32[9];
2658 physLBA_byte = (uint8_t)physLBA;
2659 *ptrLBA = physLBA_byte;
2662 * Set flag that Direct Drive I/O is
2665 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2672 #if __FreeBSD_version >= 900026
2674 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2676 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2677 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2681 ccb = cm->cm_complete_data;
2684 * Currently there should be no way we can hit this case. It only
2685 * happens when we have a failure to allocate chain frames, and SMP
2686 * commands require two S/G elements only. That should be handled
2687 * in the standard request size.
2689 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2690 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2691 __func__, cm->cm_flags);
2692 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2696 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2698 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2699 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2703 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2704 sasaddr = le32toh(req->SASAddress.Low);
2705 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2707 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2708 MPI2_IOCSTATUS_SUCCESS ||
2709 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2710 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2711 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2712 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2716 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2717 "%#jx completed successfully\n", __func__,
2718 (uintmax_t)sasaddr);
2720 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2721 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2723 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2727 * We sync in both directions because we had DMAs in the S/G list
2728 * in both directions.
2730 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2731 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2732 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2733 mps_free_command(sc, cm);
2738 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2740 struct mps_command *cm;
2741 uint8_t *request, *response;
2742 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2743 struct mps_softc *sc;
2750 * XXX We don't yet support physical addresses here.
2752 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2753 case CAM_DATA_PADDR:
2754 case CAM_DATA_SG_PADDR:
2755 mps_dprint(sc, MPS_ERROR,
2756 "%s: physical addresses not supported\n", __func__);
2757 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2762 * The chip does not support more than one buffer for the
2763 * request or response.
2765 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2766 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2767 mps_dprint(sc, MPS_ERROR,
2768 "%s: multiple request or response "
2769 "buffer segments not supported for SMP\n",
2771 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2777 * The CAM_SCATTER_VALID flag was originally implemented
2778 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2779 * We have two. So, just take that flag to mean that we
2780 * might have S/G lists, and look at the S/G segment count
2781 * to figure out whether that is the case for each individual
2784 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2785 bus_dma_segment_t *req_sg;
2787 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2788 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2790 request = ccb->smpio.smp_request;
2792 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2793 bus_dma_segment_t *rsp_sg;
2795 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2796 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2798 response = ccb->smpio.smp_response;
2800 case CAM_DATA_VADDR:
2801 request = ccb->smpio.smp_request;
2802 response = ccb->smpio.smp_response;
2805 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2810 cm = mps_alloc_command(sc);
2812 mps_dprint(sc, MPS_ERROR,
2813 "%s: cannot allocate command\n", __func__);
2814 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2819 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2820 bzero(req, sizeof(*req));
2821 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2823 /* Allow the chip to use any route to this SAS address. */
2824 req->PhysicalPort = 0xff;
2826 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2828 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2830 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2831 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2833 mpi_init_sge(cm, req, &req->SGL);
2836 * Set up a uio to pass into mps_map_command(). This allows us to
2837 * do one map command, and one busdma call in there.
2839 cm->cm_uio.uio_iov = cm->cm_iovec;
2840 cm->cm_uio.uio_iovcnt = 2;
2841 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2844 * The read/write flag isn't used by busdma, but set it just in
2845 * case. This isn't exactly accurate, either, since we're going in
2848 cm->cm_uio.uio_rw = UIO_WRITE;
2850 cm->cm_iovec[0].iov_base = request;
2851 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2852 cm->cm_iovec[1].iov_base = response;
2853 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2855 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2856 cm->cm_iovec[1].iov_len;
2859 * Trigger a warning message in mps_data_cb() for the user if we
2860 * wind up exceeding two S/G segments. The chip expects one
2861 * segment for the request and another for the response.
2863 cm->cm_max_segs = 2;
2865 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2866 cm->cm_complete = mpssas_smpio_complete;
2867 cm->cm_complete_data = ccb;
2870 * Tell the mapping code that we're using a uio, and that this is
2871 * an SMP passthrough request. There is a little special-case
2872 * logic there (in mps_data_cb()) to handle the bidirectional
2875 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2876 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2878 /* The chip data format is little endian. */
2879 req->SASAddress.High = htole32(sasaddr >> 32);
2880 req->SASAddress.Low = htole32(sasaddr);
2883 * XXX Note that we don't have a timeout/abort mechanism here.
2884 * From the manual, it looks like task management requests only
2885 * work for SCSI IO and SATA passthrough requests. We may need to
2886 * have a mechanism to retry requests in the event of a chip reset
2887 * at least. Hopefully the chip will insure that any errors short
2888 * of that are relayed back to the driver.
2890 error = mps_map_command(sc, cm);
2891 if ((error != 0) && (error != EINPROGRESS)) {
2892 mps_dprint(sc, MPS_ERROR,
2893 "%s: error %d returned from mps_map_command()\n",
2901 mps_free_command(sc, cm);
2902 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2909 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2911 struct mps_softc *sc;
2912 struct mpssas_target *targ;
2913 uint64_t sasaddr = 0;
2918 * Make sure the target exists.
2920 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2921 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2922 targ = &sassc->targets[ccb->ccb_h.target_id];
2923 if (targ->handle == 0x0) {
2924 mps_dprint(sc, MPS_ERROR,
2925 "%s: target %d does not exist!\n", __func__,
2926 ccb->ccb_h.target_id);
2927 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2933 * If this device has an embedded SMP target, we'll talk to it
2935 * figure out what the expander's address is.
2937 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2938 sasaddr = targ->sasaddr;
2941 * If we don't have a SAS address for the expander yet, try
2942 * grabbing it from the page 0x83 information cached in the
2943 * transport layer for this target. LSI expanders report the
2944 * expander SAS address as the port-associated SAS address in
2945 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2948 * XXX KDM disable this for now, but leave it commented out so that
2949 * it is obvious that this is another possible way to get the SAS
2952 * The parent handle method below is a little more reliable, and
2953 * the other benefit is that it works for devices other than SES
2954 * devices. So you can send a SMP request to a da(4) device and it
2955 * will get routed to the expander that device is attached to.
2956 * (Assuming the da(4) device doesn't contain an SMP target...)
2960 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2964 * If we still don't have a SAS address for the expander, look for
2965 * the parent device of this device, which is probably the expander.
2968 #ifdef OLD_MPS_PROBE
2969 struct mpssas_target *parent_target;
2972 if (targ->parent_handle == 0x0) {
2973 mps_dprint(sc, MPS_ERROR,
2974 "%s: handle %d does not have a valid "
2975 "parent handle!\n", __func__, targ->handle);
2976 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2979 #ifdef OLD_MPS_PROBE
2980 parent_target = mpssas_find_target_by_handle(sassc, 0,
2981 targ->parent_handle);
2983 if (parent_target == NULL) {
2984 mps_dprint(sc, MPS_ERROR,
2985 "%s: handle %d does not have a valid "
2986 "parent target!\n", __func__, targ->handle);
2987 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2991 if ((parent_target->devinfo &
2992 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2993 mps_dprint(sc, MPS_ERROR,
2994 "%s: handle %d parent %d does not "
2995 "have an SMP target!\n", __func__,
2996 targ->handle, parent_target->handle);
2997 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3002 sasaddr = parent_target->sasaddr;
3003 #else /* OLD_MPS_PROBE */
3004 if ((targ->parent_devinfo &
3005 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
3006 mps_dprint(sc, MPS_ERROR,
3007 "%s: handle %d parent %d does not "
3008 "have an SMP target!\n", __func__,
3009 targ->handle, targ->parent_handle);
3010 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3014 if (targ->parent_sasaddr == 0x0) {
3015 mps_dprint(sc, MPS_ERROR,
3016 "%s: handle %d parent handle %d does "
3017 "not have a valid SAS address!\n",
3018 __func__, targ->handle, targ->parent_handle);
3019 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3023 sasaddr = targ->parent_sasaddr;
3024 #endif /* OLD_MPS_PROBE */
3029 mps_dprint(sc, MPS_INFO,
3030 "%s: unable to find SAS address for handle %d\n",
3031 __func__, targ->handle);
3032 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3035 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3043 #endif //__FreeBSD_version >= 900026
3046 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3048 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3049 struct mps_softc *sc;
3050 struct mps_command *tm;
3051 struct mpssas_target *targ;
3053 MPS_FUNCTRACE(sassc->sc);
3054 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3056 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3057 ("Target %d out of bounds in XPT_RESET_DEV\n",
3058 ccb->ccb_h.target_id));
3060 tm = mps_alloc_command(sc);
3062 mps_dprint(sc, MPS_ERROR,
3063 "command alloc failure in mpssas_action_resetdev\n");
3064 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3069 targ = &sassc->targets[ccb->ccb_h.target_id];
3070 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3071 req->DevHandle = htole16(targ->handle);
3072 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3073 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3075 /* SAS Hard Link Reset / SATA Link Reset */
3076 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3079 tm->cm_complete = mpssas_resetdev_complete;
3080 tm->cm_complete_data = ccb;
3082 targ->flags |= MPSSAS_TARGET_INRESET;
3084 mps_map_command(sc, tm);
3088 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3090 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3094 mtx_assert(&sc->mps_mtx, MA_OWNED);
3096 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3097 ccb = tm->cm_complete_data;
3100 * Currently there should be no way we can hit this case. It only
3101 * happens when we have a failure to allocate chain frames, and
3102 * task management commands don't have S/G lists.
3104 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3105 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3107 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3109 mps_dprint(sc, MPS_ERROR,
3110 "%s: cm_flags = %#x for reset of handle %#04x! "
3111 "This should not happen!\n", __func__, tm->cm_flags,
3113 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3117 mps_dprint(sc, MPS_XINFO,
3118 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3119 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3121 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3122 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3123 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3127 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3131 mpssas_free_tm(sc, tm);
3136 mpssas_poll(struct cam_sim *sim)
3138 struct mpssas_softc *sassc;
3140 sassc = cam_sim_softc(sim);
3142 if (sassc->sc->mps_debug & MPS_TRACE) {
3143 /* frequent debug messages during a panic just slow
3144 * everything down too much.
3146 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3147 sassc->sc->mps_debug &= ~MPS_TRACE;
3150 mps_intr_locked(sassc->sc);
3154 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3157 struct mps_softc *sc;
3159 sc = (struct mps_softc *)callback_arg;
3162 #if (__FreeBSD_version >= 1000006) || \
3163 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000))
3164 case AC_ADVINFO_CHANGED: {
3165 struct mpssas_target *target;
3166 struct mpssas_softc *sassc;
3167 struct scsi_read_capacity_data_long rcap_buf;
3168 struct ccb_dev_advinfo cdai;
3169 struct mpssas_lun *lun;
3174 buftype = (uintptr_t)arg;
3180 * We're only interested in read capacity data changes.
3182 if (buftype != CDAI_TYPE_RCAPLONG)
3186 * We should have a handle for this, but check to make sure.
3188 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3189 ("Target %d out of bounds in mpssas_async\n",
3190 xpt_path_target_id(path)));
3191 target = &sassc->targets[xpt_path_target_id(path)];
3192 if (target->handle == 0)
3195 lunid = xpt_path_lun_id(path);
3197 SLIST_FOREACH(lun, &target->luns, lun_link) {
3198 if (lun->lun_id == lunid) {
3204 if (found_lun == 0) {
3205 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3208 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3209 "LUN for EEDP support.\n");
3212 lun->lun_id = lunid;
3213 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3216 bzero(&rcap_buf, sizeof(rcap_buf));
3217 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3218 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3219 cdai.ccb_h.flags = CAM_DIR_IN;
3220 cdai.buftype = CDAI_TYPE_RCAPLONG;
3221 #if (__FreeBSD_version >= 1100061) || \
3222 ((__FreeBSD_version >= 1001510) && (__FreeBSD_version < 1100000))
3223 cdai.flags = CDAI_FLAG_NONE;
3227 cdai.bufsiz = sizeof(rcap_buf);
3228 cdai.buf = (uint8_t *)&rcap_buf;
3229 xpt_action((union ccb *)&cdai);
3230 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3231 cam_release_devq(cdai.ccb_h.path,
3234 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3235 && (rcap_buf.prot & SRC16_PROT_EN)) {
3236 switch (rcap_buf.prot & SRC16_P_TYPE) {
3239 lun->eedp_formatted = TRUE;
3240 lun->eedp_block_size =
3241 scsi_4btoul(rcap_buf.length);
3245 lun->eedp_formatted = FALSE;
3246 lun->eedp_block_size = 0;
3250 lun->eedp_formatted = FALSE;
3251 lun->eedp_block_size = 0;
3256 case AC_FOUND_DEVICE: {
3257 struct ccb_getdev *cgd;
3260 mpssas_check_eedp(sc, path, cgd);
3269 #if (__FreeBSD_version < 901503) || \
3270 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006))
3272 mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path,
3273 struct ccb_getdev *cgd)
3275 struct mpssas_softc *sassc = sc->sassc;
3276 struct ccb_scsiio *csio;
3277 struct scsi_read_capacity_16 *scsi_cmd;
3278 struct scsi_read_capacity_eedp *rcap_buf;
3280 target_id_t targetid;
3283 struct cam_path *local_path;
3284 struct mpssas_target *target;
3285 struct mpssas_lun *lun;
3290 pathid = cam_sim_path(sassc->sim);
3291 targetid = xpt_path_target_id(path);
3292 lunid = xpt_path_lun_id(path);
3294 KASSERT(targetid < sassc->maxtargets,
3295 ("Target %d out of bounds in mpssas_check_eedp\n",
3297 target = &sassc->targets[targetid];
3298 if (target->handle == 0x0)
3302 * Determine if the device is EEDP capable.
3304 * If this flag is set in the inquiry data,
3305 * the device supports protection information,
3306 * and must support the 16 byte read
3307 * capacity command, otherwise continue without
3308 * sending read cap 16
3310 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0)
3314 * Issue a READ CAPACITY 16 command. This info
3315 * is used to determine if the LUN is formatted
3318 ccb = xpt_alloc_ccb_nowait();
3320 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB "
3321 "for EEDP support.\n");
3325 if (xpt_create_path(&local_path, xpt_periph,
3326 pathid, targetid, lunid) != CAM_REQ_CMP) {
3327 mps_dprint(sc, MPS_ERROR, "Unable to create "
3328 "path for EEDP support\n");
3334 * If LUN is already in list, don't create a new
3338 SLIST_FOREACH(lun, &target->luns, lun_link) {
3339 if (lun->lun_id == lunid) {
3345 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3348 mps_dprint(sc, MPS_ERROR,
3349 "Unable to alloc LUN for EEDP support.\n");
3350 xpt_free_path(local_path);
3354 lun->lun_id = lunid;
3355 SLIST_INSERT_HEAD(&target->luns, lun,
3359 xpt_path_string(local_path, path_str, sizeof(path_str));
3361 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n",
3362 path_str, target->handle);
3365 * Issue a READ CAPACITY 16 command for the LUN.
3366 * The mpssas_read_cap_done function will load
3367 * the read cap info into the LUN struct.
3369 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp),
3370 M_MPT2, M_NOWAIT | M_ZERO);
3371 if (rcap_buf == NULL) {
3372 mps_dprint(sc, MPS_FAULT,
3373 "Unable to alloc read capacity buffer for EEDP support.\n");
3374 xpt_free_path(ccb->ccb_h.path);
3378 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT);
3380 csio->ccb_h.func_code = XPT_SCSI_IO;
3381 csio->ccb_h.flags = CAM_DIR_IN;
3382 csio->ccb_h.retry_count = 4;
3383 csio->ccb_h.cbfcnp = mpssas_read_cap_done;
3384 csio->ccb_h.timeout = 60000;
3385 csio->data_ptr = (uint8_t *)rcap_buf;
3386 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp);
3387 csio->sense_len = MPS_SENSE_LEN;
3388 csio->cdb_len = sizeof(*scsi_cmd);
3389 csio->tag_action = MSG_SIMPLE_Q_TAG;
3391 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes;
3392 bzero(scsi_cmd, sizeof(*scsi_cmd));
3393 scsi_cmd->opcode = 0x9E;
3394 scsi_cmd->service_action = SRC16_SERVICE_ACTION;
3395 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp);
3397 ccb->ccb_h.ppriv_ptr1 = sassc;
3402 mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb)
3404 struct mpssas_softc *sassc;
3405 struct mpssas_target *target;
3406 struct mpssas_lun *lun;
3407 struct scsi_read_capacity_eedp *rcap_buf;
3409 if (done_ccb == NULL)
3412 /* Driver need to release devq, it Scsi command is
3413 * generated by driver internally.
3414 * Currently there is a single place where driver
3415 * calls scsi command internally. In future if driver
3416 * calls more scsi command internally, it needs to release
3417 * devq internally, since those command will not go back to
3420 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) {
3421 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN;
3422 xpt_release_devq(done_ccb->ccb_h.path,
3423 /*count*/ 1, /*run_queue*/TRUE);
3426 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr;
3429 * Get the LUN ID for the path and look it up in the LUN list for the
3432 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1;
3433 KASSERT(done_ccb->ccb_h.target_id < sassc->maxtargets,
3434 ("Target %d out of bounds in mpssas_read_cap_done\n",
3435 done_ccb->ccb_h.target_id));
3436 target = &sassc->targets[done_ccb->ccb_h.target_id];
3437 SLIST_FOREACH(lun, &target->luns, lun_link) {
3438 if (lun->lun_id != done_ccb->ccb_h.target_lun)
3442 * Got the LUN in the target's LUN list. Fill it in
3443 * with EEDP info. If the READ CAP 16 command had some
3444 * SCSI error (common if command is not supported), mark
3445 * the lun as not supporting EEDP and set the block size
3448 if ((mpssas_get_ccbstatus(done_ccb) != CAM_REQ_CMP)
3449 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) {
3450 lun->eedp_formatted = FALSE;
3451 lun->eedp_block_size = 0;
3455 if (rcap_buf->protect & 0x01) {
3456 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for "
3457 "target ID %d is formatted for EEDP "
3458 "support.\n", done_ccb->ccb_h.target_lun,
3459 done_ccb->ccb_h.target_id);
3460 lun->eedp_formatted = TRUE;
3461 lun->eedp_block_size = scsi_4btoul(rcap_buf->length);
3466 // Finished with this CCB and path.
3467 free(rcap_buf, M_MPT2);
3468 xpt_free_path(done_ccb->ccb_h.path);
3469 xpt_free_ccb(done_ccb);
3471 #endif /* (__FreeBSD_version < 901503) || \
3472 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */
3475 * Set the INRESET flag for this target so that no I/O will be sent to
3476 * the target until the reset has completed. If an I/O request does
3477 * happen, the devq will be frozen. The CCB holds the path which is
3478 * used to release the devq. The devq is released and the CCB is freed
3479 * when the TM completes.
3482 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3483 struct mpssas_target *target, lun_id_t lun_id)
3488 ccb = xpt_alloc_ccb_nowait();
3490 path_id = cam_sim_path(sc->sassc->sim);
3491 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3492 target->tid, lun_id) != CAM_REQ_CMP) {
3496 tm->cm_targ = target;
3497 target->flags |= MPSSAS_TARGET_INRESET;
3503 mpssas_startup(struct mps_softc *sc)
3507 * Send the port enable message and set the wait_for_port_enable flag.
3508 * This flag helps to keep the simq frozen until all discovery events
3511 sc->wait_for_port_enable = 1;
3512 mpssas_send_portenable(sc);
3517 mpssas_send_portenable(struct mps_softc *sc)
3519 MPI2_PORT_ENABLE_REQUEST *request;
3520 struct mps_command *cm;
3524 if ((cm = mps_alloc_command(sc)) == NULL)
3526 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3527 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3528 request->MsgFlags = 0;
3530 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3531 cm->cm_complete = mpssas_portenable_complete;
3535 mps_map_command(sc, cm);
3536 mps_dprint(sc, MPS_XINFO,
3537 "mps_send_portenable finished cm %p req %p complete %p\n",
3538 cm, cm->cm_req, cm->cm_complete);
3543 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3545 MPI2_PORT_ENABLE_REPLY *reply;
3546 struct mpssas_softc *sassc;
3552 * Currently there should be no way we can hit this case. It only
3553 * happens when we have a failure to allocate chain frames, and
3554 * port enable commands don't have S/G lists.
3556 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3557 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3558 "This should not happen!\n", __func__, cm->cm_flags);
3561 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3563 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3564 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3565 MPI2_IOCSTATUS_SUCCESS)
3566 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3568 mps_free_command(sc, cm);
3571 * Get WarpDrive info after discovery is complete but before the scan
3572 * starts. At this point, all devices are ready to be exposed to the
3573 * OS. If devices should be hidden instead, take them out of the
3574 * 'targets' array before the scan. The devinfo for a disk will have
3575 * some info and a volume's will be 0. Use that to remove disks.
3577 mps_wd_config_pages(sc);
3580 * Done waiting for port enable to complete. Decrement the refcount.
3581 * If refcount is 0, discovery is complete and a rescan of the bus can
3582 * take place. Since the simq was explicitly frozen before port
3583 * enable, it must be explicitly released here to keep the
3584 * freeze/release count in sync.
3586 sc->wait_for_port_enable = 0;
3587 sc->port_enable_complete = 1;
3588 wakeup(&sc->port_enable_complete);
3589 mpssas_startup_decrement(sassc);
3593 mpssas_check_id(struct mpssas_softc *sassc, int id)
3595 struct mps_softc *sc = sassc->sc;
3599 ids = &sc->exclude_ids[0];
3600 while((name = strsep(&ids, ",")) != NULL) {
3601 if (name[0] == '\0')
3603 if (strtol(name, NULL, 0) == (long)id)
3611 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3613 struct mpssas_softc *sassc;
3614 struct mpssas_lun *lun, *lun_tmp;
3615 struct mpssas_target *targ;
3620 * The number of targets is based on IOC Facts, so free all of
3621 * the allocated LUNs for each target and then the target buffer
3624 for (i=0; i< maxtargets; i++) {
3625 targ = &sassc->targets[i];
3626 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3630 free(sassc->targets, M_MPT2);
3632 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3633 M_MPT2, M_WAITOK|M_ZERO);
3634 if (!sassc->targets) {
3635 panic("%s failed to alloc targets with error %d\n",