2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 /* Communications core for Avago Technologies (LSI) MPT2 */
40 /* TODO Move headers to mpsvar */
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/selinfo.h>
46 #include <sys/module.h>
50 #include <sys/malloc.h>
52 #include <sys/sysctl.h>
53 #include <sys/endian.h>
54 #include <sys/queue.h>
55 #include <sys/kthread.h>
56 #include <sys/taskqueue.h>
59 #include <machine/bus.h>
60 #include <machine/resource.h>
63 #include <machine/stdarg.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_xpt.h>
68 #include <cam/cam_debug.h>
69 #include <cam/cam_sim.h>
70 #include <cam/cam_xpt_sim.h>
71 #include <cam/cam_xpt_periph.h>
72 #include <cam/cam_periph.h>
73 #include <cam/scsi/scsi_all.h>
74 #include <cam/scsi/scsi_message.h>
75 #include <cam/scsi/smp_all.h>
77 #include <dev/mps/mpi/mpi2_type.h>
78 #include <dev/mps/mpi/mpi2.h>
79 #include <dev/mps/mpi/mpi2_ioc.h>
80 #include <dev/mps/mpi/mpi2_sas.h>
81 #include <dev/mps/mpi/mpi2_cnfg.h>
82 #include <dev/mps/mpi/mpi2_init.h>
83 #include <dev/mps/mpi/mpi2_tool.h>
84 #include <dev/mps/mps_ioctl.h>
85 #include <dev/mps/mpsvar.h>
86 #include <dev/mps/mps_table.h>
87 #include <dev/mps/mps_sas.h>
90 * static array to check SCSI OpCode for EEDP protection bits
92 #define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
93 #define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
94 #define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
95 static uint8_t op_code_prot[256] = {
96 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
97 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
100 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
104 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
105 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V,
107 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
114 MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory");
116 static void mpssas_remove_device(struct mps_softc *, struct mps_command *);
117 static void mpssas_remove_complete(struct mps_softc *, struct mps_command *);
118 static void mpssas_action(struct cam_sim *sim, union ccb *ccb);
119 static void mpssas_poll(struct cam_sim *sim);
120 static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm,
121 struct mps_command *cm);
122 static void mpssas_scsiio_timeout(void *data);
123 static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm);
124 static void mpssas_direct_drive_io(struct mpssas_softc *sassc,
125 struct mps_command *cm, union ccb *ccb);
126 static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *);
127 static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *);
128 static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *);
129 static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm);
130 static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb,
132 static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb);
133 static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *);
134 static void mpssas_async(void *callback_arg, uint32_t code,
135 struct cam_path *path, void *arg);
136 static int mpssas_send_portenable(struct mps_softc *sc);
137 static void mpssas_portenable_complete(struct mps_softc *sc,
138 struct mps_command *cm);
140 struct mpssas_target *
141 mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle)
143 struct mpssas_target *target;
146 for (i = start; i < sassc->maxtargets; i++) {
147 target = &sassc->targets[i];
148 if (target->handle == handle)
155 /* we need to freeze the simq during attach and diag reset, to avoid failing
156 * commands before device handles have been found by discovery. Since
157 * discovery involves reading config pages and possibly sending commands,
158 * discovery actions may continue even after we receive the end of discovery
159 * event, so refcount discovery actions instead of assuming we can unfreeze
160 * the simq when we get the event.
163 mpssas_startup_increment(struct mpssas_softc *sassc)
165 MPS_FUNCTRACE(sassc->sc);
167 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
168 if (sassc->startup_refcount++ == 0) {
169 /* just starting, freeze the simq */
170 mps_dprint(sassc->sc, MPS_INIT,
171 "%s freezing simq\n", __func__);
173 xpt_freeze_simq(sassc->sim, 1);
175 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
176 sassc->startup_refcount);
181 mpssas_release_simq_reinit(struct mpssas_softc *sassc)
183 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
184 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
185 xpt_release_simq(sassc->sim, 1);
186 mps_dprint(sassc->sc, MPS_INFO, "Unfreezing SIM queue\n");
191 mpssas_startup_decrement(struct mpssas_softc *sassc)
193 MPS_FUNCTRACE(sassc->sc);
195 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) {
196 if (--sassc->startup_refcount == 0) {
197 /* finished all discovery-related actions, release
198 * the simq and rescan for the latest topology.
200 mps_dprint(sassc->sc, MPS_INIT,
201 "%s releasing simq\n", __func__);
202 sassc->flags &= ~MPSSAS_IN_STARTUP;
203 xpt_release_simq(sassc->sim, 1);
206 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__,
207 sassc->startup_refcount);
212 * The firmware requires us to stop sending commands when we're doing task
214 * XXX The logic for serializing the device has been made lazy and moved to
215 * mpssas_prepare_for_tm().
218 mpssas_alloc_tm(struct mps_softc *sc)
220 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
221 struct mps_command *tm;
223 tm = mps_alloc_high_priority_command(sc);
227 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
228 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
233 mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm)
239 * For TM's the devq is frozen for the device. Unfreeze it here and
240 * free the resources used for freezing the devq. Must clear the
241 * INRESET flag as well or scsi I/O will not work.
244 mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
245 "Unfreezing devq for target ID %d\n",
247 tm->cm_targ->flags &= ~MPSSAS_TARGET_INRESET;
248 xpt_release_devq(tm->cm_ccb->ccb_h.path, 1, TRUE);
249 xpt_free_path(tm->cm_ccb->ccb_h.path);
250 xpt_free_ccb(tm->cm_ccb);
253 mps_free_high_priority_command(sc, tm);
257 mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ)
259 struct mpssas_softc *sassc = sc->sassc;
261 target_id_t targetid;
265 pathid = cam_sim_path(sassc->sim);
267 targetid = CAM_TARGET_WILDCARD;
269 targetid = targ - sassc->targets;
272 * Allocate a CCB and schedule a rescan.
274 ccb = xpt_alloc_ccb_nowait();
276 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n");
280 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid,
281 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
282 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n");
287 if (targetid == CAM_TARGET_WILDCARD)
288 ccb->ccb_h.func_code = XPT_SCAN_BUS;
290 ccb->ccb_h.func_code = XPT_SCAN_TGT;
292 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid);
297 mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...)
307 /* No need to be in here if debugging isn't enabled */
308 if ((cm->cm_sc->mps_debug & level) == 0)
311 sbuf_new(&sb, str, sizeof(str), 0);
315 if (cm->cm_ccb != NULL) {
316 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str,
318 sbuf_cat(&sb, path_str);
319 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) {
320 scsi_command_string(&cm->cm_ccb->csio, &sb);
321 sbuf_printf(&sb, "length %d ",
322 cm->cm_ccb->csio.dxfer_len);
326 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ",
327 cam_sim_name(cm->cm_sc->sassc->sim),
328 cam_sim_unit(cm->cm_sc->sassc->sim),
329 cam_sim_bus(cm->cm_sc->sassc->sim),
330 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF,
334 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID);
335 sbuf_vprintf(&sb, fmt, ap);
337 mps_print_field(cm->cm_sc, "%s", sbuf_data(&sb));
343 mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm)
345 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
346 struct mpssas_target *targ;
351 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
352 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
356 /* XXX retry the remove after the diag reset completes? */
357 mps_dprint(sc, MPS_FAULT,
358 "%s NULL reply resetting device 0x%04x\n", __func__,
360 mpssas_free_tm(sc, tm);
364 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
365 MPI2_IOCSTATUS_SUCCESS) {
366 mps_dprint(sc, MPS_ERROR,
367 "IOCStatus = 0x%x while resetting device 0x%x\n",
368 le16toh(reply->IOCStatus), handle);
371 mps_dprint(sc, MPS_XINFO,
372 "Reset aborted %u commands\n", reply->TerminationCount);
373 mps_free_reply(sc, tm->cm_reply_data);
374 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
376 mps_dprint(sc, MPS_XINFO,
377 "clearing target %u handle 0x%04x\n", targ->tid, handle);
380 * Don't clear target if remove fails because things will get confusing.
381 * Leave the devname and sasaddr intact so that we know to avoid reusing
382 * this target id if possible, and so we can assign the same target id
383 * to this device if it comes back in the future.
385 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
386 MPI2_IOCSTATUS_SUCCESS) {
389 targ->encl_handle = 0x0;
390 targ->encl_slot = 0x0;
391 targ->exp_dev_handle = 0x0;
393 targ->linkrate = 0x0;
398 mpssas_free_tm(sc, tm);
402 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal.
403 * Otherwise Volume Delete is same as Bare Drive Removal.
406 mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle)
408 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
409 struct mps_softc *sc;
410 struct mps_command *tm;
411 struct mpssas_target *targ = NULL;
413 MPS_FUNCTRACE(sassc->sc);
418 * If this is a WD controller, determine if the disk should be exposed
419 * to the OS or not. If disk should be exposed, return from this
420 * function without doing anything.
422 if (sc->WD_available && (sc->WD_hide_expose ==
423 MPS_WD_EXPOSE_ALWAYS)) {
428 targ = mpssas_find_target_by_handle(sassc, 0, handle);
430 /* FIXME: what is the action? */
431 /* We don't know about this device? */
432 mps_dprint(sc, MPS_ERROR,
433 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle);
437 targ->flags |= MPSSAS_TARGET_INREMOVAL;
439 tm = mpssas_alloc_tm(sc);
441 mps_dprint(sc, MPS_ERROR,
442 "%s: command alloc failure\n", __func__);
446 mpssas_rescan_target(sc, targ);
448 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
449 req->DevHandle = targ->handle;
450 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
452 /* SAS Hard Link Reset / SATA Link Reset */
453 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
457 tm->cm_complete = mpssas_remove_volume;
458 tm->cm_complete_data = (void *)(uintptr_t)handle;
460 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
461 __func__, targ->tid);
462 mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
464 mps_map_command(sc, tm);
468 * The MPT2 firmware performs debounce on the link to avoid transient link
469 * errors and false removals. When it does decide that link has been lost
470 * and a device need to go away, it expects that the host will perform a
471 * target reset and then an op remove. The reset has the side-effect of
472 * aborting any outstanding requests for the device, which is required for
473 * the op-remove to succeed. It's not clear if the host should check for
474 * the device coming back alive after the reset.
477 mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle)
479 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
480 struct mps_softc *sc;
481 struct mps_command *cm;
482 struct mpssas_target *targ = NULL;
484 MPS_FUNCTRACE(sassc->sc);
488 targ = mpssas_find_target_by_handle(sassc, 0, handle);
490 /* FIXME: what is the action? */
491 /* We don't know about this device? */
492 mps_dprint(sc, MPS_ERROR,
493 "%s : invalid handle 0x%x \n", __func__, handle);
497 targ->flags |= MPSSAS_TARGET_INREMOVAL;
499 cm = mpssas_alloc_tm(sc);
501 mps_dprint(sc, MPS_ERROR,
502 "%s: command alloc failure\n", __func__);
506 mpssas_rescan_target(sc, targ);
508 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req;
509 req->DevHandle = htole16(targ->handle);
510 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
512 /* SAS Hard Link Reset / SATA Link Reset */
513 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
517 cm->cm_complete = mpssas_remove_device;
518 cm->cm_complete_data = (void *)(uintptr_t)handle;
520 mps_dprint(sc, MPS_INFO, "%s: Sending reset for target ID %d\n",
521 __func__, targ->tid);
522 mpssas_prepare_for_tm(sc, cm, targ, CAM_LUN_WILDCARD);
524 mps_map_command(sc, cm);
528 mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm)
530 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
531 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req;
532 struct mpssas_target *targ;
537 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
538 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
542 * Currently there should be no way we can hit this case. It only
543 * happens when we have a failure to allocate chain frames, and
544 * task management commands don't have S/G lists.
546 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
547 mps_dprint(sc, MPS_ERROR,
548 "%s: cm_flags = %#x for remove of handle %#04x! "
549 "This should not happen!\n", __func__, tm->cm_flags,
554 /* XXX retry the remove after the diag reset completes? */
555 mps_dprint(sc, MPS_FAULT,
556 "%s NULL reply resetting device 0x%04x\n", __func__,
558 mpssas_free_tm(sc, tm);
562 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
563 MPI2_IOCSTATUS_SUCCESS) {
564 mps_dprint(sc, MPS_ERROR,
565 "IOCStatus = 0x%x while resetting device 0x%x\n",
566 le16toh(reply->IOCStatus), handle);
569 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n",
570 le32toh(reply->TerminationCount));
571 mps_free_reply(sc, tm->cm_reply_data);
572 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */
574 /* Reuse the existing command */
575 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req;
576 memset(req, 0, sizeof(*req));
577 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
578 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
579 req->DevHandle = htole16(handle);
581 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
582 tm->cm_complete = mpssas_remove_complete;
583 tm->cm_complete_data = (void *)(uintptr_t)handle;
586 * Wait to send the REMOVE_DEVICE until all the commands have cleared.
587 * They should be aborted or time out and we'll kick thus off there
590 if (TAILQ_FIRST(&targ->commands) == NULL) {
591 mps_dprint(sc, MPS_INFO, "No pending commands: starting remove_device\n");
592 mps_map_command(sc, tm);
593 targ->pending_remove_tm = NULL;
595 targ->pending_remove_tm = tm;
598 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n",
603 mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm)
605 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply;
607 struct mpssas_target *targ;
608 struct mpssas_lun *lun;
612 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply;
613 handle = (uint16_t)(uintptr_t)tm->cm_complete_data;
617 * At this point, we should have no pending commands for the target.
618 * The remove target has just completed.
620 KASSERT(TAILQ_FIRST(&targ->commands) == NULL,
621 ("%s: no commands should be pending\n", __func__));
624 * Currently there should be no way we can hit this case. It only
625 * happens when we have a failure to allocate chain frames, and
626 * task management commands don't have S/G lists.
628 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
629 mps_dprint(sc, MPS_XINFO,
630 "%s: cm_flags = %#x for remove of handle %#04x! "
631 "This should not happen!\n", __func__, tm->cm_flags,
633 mpssas_free_tm(sc, tm);
638 /* most likely a chip reset */
639 mps_dprint(sc, MPS_FAULT,
640 "%s NULL reply removing device 0x%04x\n", __func__, handle);
641 mpssas_free_tm(sc, tm);
645 mps_dprint(sc, MPS_XINFO,
646 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__,
647 handle, le16toh(reply->IOCStatus));
650 * Don't clear target if remove fails because things will get confusing.
651 * Leave the devname and sasaddr intact so that we know to avoid reusing
652 * this target id if possible, and so we can assign the same target id
653 * to this device if it comes back in the future.
655 if ((le16toh(reply->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
656 MPI2_IOCSTATUS_SUCCESS) {
658 targ->encl_handle = 0x0;
659 targ->encl_slot = 0x0;
660 targ->exp_dev_handle = 0x0;
662 targ->linkrate = 0x0;
666 while(!SLIST_EMPTY(&targ->luns)) {
667 lun = SLIST_FIRST(&targ->luns);
668 SLIST_REMOVE_HEAD(&targ->luns, lun_link);
673 mpssas_free_tm(sc, tm);
677 mpssas_register_events(struct mps_softc *sc)
679 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
682 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
683 setbit(events, MPI2_EVENT_SAS_DISCOVERY);
684 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
685 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE);
686 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW);
687 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
688 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
689 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
690 setbit(events, MPI2_EVENT_IR_VOLUME);
691 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK);
692 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS);
693 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
695 mps_register_events(sc, events, mpssas_evt_handler, NULL,
696 &sc->sassc->mpssas_eh);
702 mps_attach_sas(struct mps_softc *sc)
704 struct mpssas_softc *sassc;
706 int unit, error = 0, reqs;
709 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
711 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO);
714 * XXX MaxTargets could change during a reinit. Since we don't
715 * resize the targets[] array during such an event, cache the value
716 * of MaxTargets here so that we don't get into trouble later. This
717 * should move into the reinit logic.
719 sassc->maxtargets = sc->facts->MaxTargets + sc->facts->MaxVolumes;
720 sassc->targets = malloc(sizeof(struct mpssas_target) *
721 sassc->maxtargets, M_MPT2, M_WAITOK|M_ZERO);
725 reqs = sc->num_reqs - sc->num_prireqs - 1;
726 if ((sassc->devq = cam_simq_alloc(reqs)) == NULL) {
727 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n");
732 unit = device_get_unit(sc->mps_dev);
733 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc,
734 unit, &sc->mps_mtx, reqs, reqs, sassc->devq);
735 if (sassc->sim == NULL) {
736 mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot allocate SIM\n");
741 TAILQ_INIT(&sassc->ev_queue);
743 /* Initialize taskqueue for Event Handling */
744 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc);
745 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO,
746 taskqueue_thread_enqueue, &sassc->ev_tq);
747 taskqueue_start_threads(&sassc->ev_tq, 1, PRIBIO, "%s taskq",
748 device_get_nameunit(sc->mps_dev));
753 * XXX There should be a bus for every port on the adapter, but since
754 * we're just going to fake the topology for now, we'll pretend that
755 * everything is just a target on a single bus.
757 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) {
758 mps_dprint(sc, MPS_INIT|MPS_ERROR,
759 "Error %d registering SCSI bus\n", error);
765 * Assume that discovery events will start right away.
767 * Hold off boot until discovery is complete.
769 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY;
770 sc->sassc->startup_refcount = 0;
771 mpssas_startup_increment(sassc);
776 * Register for async events so we can determine the EEDP
777 * capabilities of devices.
779 status = xpt_create_path(&sassc->path, /*periph*/NULL,
780 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD,
782 if (status != CAM_REQ_CMP) {
783 mps_dprint(sc, MPS_ERROR|MPS_INIT,
784 "Error %#x creating sim path\n", status);
789 event = AC_ADVINFO_CHANGED;
790 status = xpt_register_async(event, mpssas_async, sc,
792 if (status != CAM_REQ_CMP) {
793 mps_dprint(sc, MPS_ERROR,
794 "Error %#x registering async handler for "
795 "AC_ADVINFO_CHANGED events\n", status);
796 xpt_free_path(sassc->path);
800 if (status != CAM_REQ_CMP) {
802 * EEDP use is the exception, not the rule.
803 * Warn the user, but do not fail to attach.
805 mps_printf(sc, "EEDP capabilities disabled.\n");
808 mpssas_register_events(sc);
813 mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
818 mps_detach_sas(struct mps_softc *sc)
820 struct mpssas_softc *sassc;
821 struct mpssas_lun *lun, *lun_tmp;
822 struct mpssas_target *targ;
827 if (sc->sassc == NULL)
831 mps_deregister_events(sc, sassc->mpssas_eh);
834 * Drain and free the event handling taskqueue with the lock
835 * unheld so that any parallel processing tasks drain properly
836 * without deadlocking.
838 if (sassc->ev_tq != NULL)
839 taskqueue_free(sassc->ev_tq);
841 /* Deregister our async handler */
842 if (sassc->path != NULL) {
843 xpt_register_async(0, mpssas_async, sc, sassc->path);
844 xpt_free_path(sassc->path);
848 /* Make sure CAM doesn't wedge if we had to bail out early. */
851 while (sassc->startup_refcount != 0)
852 mpssas_startup_decrement(sassc);
854 if (sassc->flags & MPSSAS_IN_STARTUP)
855 xpt_release_simq(sassc->sim, 1);
857 if (sassc->sim != NULL) {
858 xpt_bus_deregister(cam_sim_path(sassc->sim));
859 cam_sim_free(sassc->sim, FALSE);
864 if (sassc->devq != NULL)
865 cam_simq_free(sassc->devq);
867 for(i=0; i< sassc->maxtargets ;i++) {
868 targ = &sassc->targets[i];
869 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
873 free(sassc->targets, M_MPT2);
881 mpssas_discovery_end(struct mpssas_softc *sassc)
883 struct mps_softc *sc = sassc->sc;
888 * After discovery has completed, check the mapping table for any
889 * missing devices and update their missing counts. Only do this once
890 * whenever the driver is initialized so that missing counts aren't
891 * updated unnecessarily. Note that just because discovery has
892 * completed doesn't mean that events have been processed yet. The
893 * check_devices function is a callout timer that checks if ALL devices
894 * are missing. If so, it will wait a little longer for events to
895 * complete and keep resetting itself until some device in the mapping
896 * table is not missing, meaning that event processing has started.
898 if (sc->track_mapping_events) {
899 mps_dprint(sc, MPS_XINFO | MPS_MAPPING, "Discovery has "
900 "completed. Check for missing devices in the mapping "
902 callout_reset(&sc->device_check_callout,
903 MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices,
909 mpssas_action(struct cam_sim *sim, union ccb *ccb)
911 struct mpssas_softc *sassc;
913 sassc = cam_sim_softc(sim);
915 MPS_FUNCTRACE(sassc->sc);
916 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n",
917 ccb->ccb_h.func_code);
918 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
920 switch (ccb->ccb_h.func_code) {
923 struct ccb_pathinq *cpi = &ccb->cpi;
924 struct mps_softc *sc = sassc->sc;
926 cpi->version_num = 1;
927 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
928 cpi->target_sprt = 0;
929 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN;
930 cpi->hba_eng_cnt = 0;
931 cpi->max_target = sassc->maxtargets - 1;
935 * initiator_id is set here to an ID outside the set of valid
936 * target IDs (including volumes).
938 cpi->initiator_id = sassc->maxtargets;
939 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
940 strlcpy(cpi->hba_vid, "Avago Tech", HBA_IDLEN);
941 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
942 cpi->unit_number = cam_sim_unit(sim);
943 cpi->bus_id = cam_sim_bus(sim);
944 cpi->base_transfer_speed = 150000;
945 cpi->transport = XPORT_SAS;
946 cpi->transport_version = 0;
947 cpi->protocol = PROTO_SCSI;
948 cpi->protocol_version = SCSI_REV_SPC;
949 cpi->maxio = sc->maxio;
950 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
953 case XPT_GET_TRAN_SETTINGS:
955 struct ccb_trans_settings *cts;
956 struct ccb_trans_settings_sas *sas;
957 struct ccb_trans_settings_scsi *scsi;
958 struct mpssas_target *targ;
961 sas = &cts->xport_specific.sas;
962 scsi = &cts->proto_specific.scsi;
964 KASSERT(cts->ccb_h.target_id < sassc->maxtargets,
965 ("Target %d out of bounds in XPT_GET_TRANS_SETTINGS\n",
966 cts->ccb_h.target_id));
967 targ = &sassc->targets[cts->ccb_h.target_id];
968 if (targ->handle == 0x0) {
969 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
973 cts->protocol_version = SCSI_REV_SPC2;
974 cts->transport = XPORT_SAS;
975 cts->transport_version = 0;
977 sas->valid = CTS_SAS_VALID_SPEED;
978 switch (targ->linkrate) {
980 sas->bitrate = 150000;
983 sas->bitrate = 300000;
986 sas->bitrate = 600000;
992 cts->protocol = PROTO_SCSI;
993 scsi->valid = CTS_SCSI_VALID_TQ;
994 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
996 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
999 case XPT_CALC_GEOMETRY:
1000 cam_calc_geometry(&ccb->ccg, /*extended*/1);
1001 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1004 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n");
1005 mpssas_action_resetdev(sassc, ccb);
1010 mps_dprint(sassc->sc, MPS_XINFO,
1011 "mpssas_action faking success for abort or reset\n");
1012 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1015 mpssas_action_scsiio(sassc, ccb);
1018 mpssas_action_smpio(sassc, ccb);
1021 mpssas_set_ccbstatus(ccb, CAM_FUNC_NOTAVAIL);
1029 mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code,
1030 target_id_t target_id, lun_id_t lun_id)
1032 path_id_t path_id = cam_sim_path(sc->sassc->sim);
1033 struct cam_path *path;
1035 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %jx\n", __func__,
1036 ac_code, target_id, (uintmax_t)lun_id);
1038 if (xpt_create_path(&path, NULL,
1039 path_id, target_id, lun_id) != CAM_REQ_CMP) {
1040 mps_dprint(sc, MPS_ERROR, "unable to create path for reset "
1045 xpt_async(ac_code, path, NULL);
1046 xpt_free_path(path);
1050 mpssas_complete_all_commands(struct mps_softc *sc)
1052 struct mps_command *cm;
1057 mtx_assert(&sc->mps_mtx, MA_OWNED);
1059 /* complete all commands with a NULL reply */
1060 for (i = 1; i < sc->num_reqs; i++) {
1061 cm = &sc->commands[i];
1062 if (cm->cm_state == MPS_CM_STATE_FREE)
1065 cm->cm_state = MPS_CM_STATE_BUSY;
1066 cm->cm_reply = NULL;
1069 if (cm->cm_flags & MPS_CM_FLAGS_SATA_ID_TIMEOUT) {
1071 free(cm->cm_data, M_MPT2);
1075 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1076 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1078 if (cm->cm_complete != NULL) {
1079 mpssas_log_command(cm, MPS_RECOVERY,
1080 "completing cm %p state %x ccb %p for diag reset\n",
1081 cm, cm->cm_state, cm->cm_ccb);
1083 cm->cm_complete(sc, cm);
1085 } else if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
1086 mpssas_log_command(cm, MPS_RECOVERY,
1087 "waking up cm %p state %x ccb %p for diag reset\n",
1088 cm, cm->cm_state, cm->cm_ccb);
1093 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) {
1094 /* this should never happen, but if it does, log */
1095 mpssas_log_command(cm, MPS_RECOVERY,
1096 "cm %p state %x flags 0x%x ccb %p during diag "
1097 "reset\n", cm, cm->cm_state, cm->cm_flags,
1102 sc->io_cmds_active = 0;
1106 mpssas_handle_reinit(struct mps_softc *sc)
1110 /* Go back into startup mode and freeze the simq, so that CAM
1111 * doesn't send any commands until after we've rediscovered all
1112 * targets and found the proper device handles for them.
1114 * After the reset, portenable will trigger discovery, and after all
1115 * discovery-related activities have finished, the simq will be
1118 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__);
1119 sc->sassc->flags |= MPSSAS_IN_STARTUP;
1120 sc->sassc->flags |= MPSSAS_IN_DISCOVERY;
1121 mpssas_startup_increment(sc->sassc);
1123 /* notify CAM of a bus reset */
1124 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
1127 /* complete and cleanup after all outstanding commands */
1128 mpssas_complete_all_commands(sc);
1130 mps_dprint(sc, MPS_INIT,
1131 "%s startup %u after command completion\n", __func__,
1132 sc->sassc->startup_refcount);
1134 /* zero all the target handles, since they may change after the
1135 * reset, and we have to rediscover all the targets and use the new
1138 for (i = 0; i < sc->sassc->maxtargets; i++) {
1139 if (sc->sassc->targets[i].outstanding != 0)
1140 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n",
1141 i, sc->sassc->targets[i].outstanding);
1142 sc->sassc->targets[i].handle = 0x0;
1143 sc->sassc->targets[i].exp_dev_handle = 0x0;
1144 sc->sassc->targets[i].outstanding = 0;
1145 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET;
1150 mpssas_tm_timeout(void *data)
1152 struct mps_command *tm = data;
1153 struct mps_softc *sc = tm->cm_sc;
1155 mtx_assert(&sc->mps_mtx, MA_OWNED);
1157 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY,
1158 "task mgmt %p timed out\n", tm);
1160 KASSERT(tm->cm_state == MPS_CM_STATE_INQUEUE,
1161 ("command not inqueue, state = %u\n", tm->cm_state));
1163 tm->cm_state = MPS_CM_STATE_BUSY;
1168 mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1170 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1171 unsigned int cm_count = 0;
1172 struct mps_command *cm;
1173 struct mpssas_target *targ;
1175 callout_stop(&tm->cm_callout);
1177 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1181 * Currently there should be no way we can hit this case. It only
1182 * happens when we have a failure to allocate chain frames, and
1183 * task management commands don't have S/G lists.
1184 * XXXSL So should it be an assertion?
1186 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1187 mps_dprint(sc, MPS_RECOVERY|MPS_ERROR,
1188 "%s: cm_flags = %#x for LUN reset! "
1189 "This should not happen!\n", __func__, tm->cm_flags);
1190 mpssas_free_tm(sc, tm);
1194 if (reply == NULL) {
1195 mps_dprint(sc, MPS_RECOVERY, "NULL reset reply for tm %p\n",
1197 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1198 /* this completion was due to a reset, just cleanup */
1199 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1200 "reset, ignoring NULL LUN reset reply\n");
1202 mpssas_free_tm(sc, tm);
1205 /* we should have gotten a reply. */
1206 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1207 "LUN reset attempt, resetting controller\n");
1213 mps_dprint(sc, MPS_RECOVERY,
1214 "logical unit reset status 0x%x code 0x%x count %u\n",
1215 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1216 le32toh(reply->TerminationCount));
1219 * See if there are any outstanding commands for this LUN.
1220 * This could be made more efficient by using a per-LU data
1221 * structure of some sort.
1223 TAILQ_FOREACH(cm, &targ->commands, cm_link) {
1224 if (cm->cm_lun == tm->cm_lun)
1228 if (cm_count == 0) {
1229 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1230 "Finished recovery after LUN reset for target %u\n",
1233 mpssas_announce_reset(sc, AC_SENT_BDR, targ->tid, tm->cm_lun);
1236 * We've finished recovery for this logical unit. check and
1237 * see if some other logical unit has a timedout command
1238 * that needs to be processed.
1240 cm = TAILQ_FIRST(&targ->timedout_commands);
1242 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1243 "More commands to abort for target %u\n",
1245 mpssas_send_abort(sc, tm, cm);
1248 mpssas_free_tm(sc, tm);
1252 * If we still have commands for this LUN, the reset
1253 * effectively failed, regardless of the status reported.
1254 * Escalate to a target reset.
1256 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1257 "logical unit reset complete for target %u, but still "
1258 "have %u command(s), sending target reset\n", targ->tid,
1260 mpssas_send_reset(sc, tm,
1261 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
1266 mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm)
1268 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1269 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1270 struct mpssas_target *targ;
1272 callout_stop(&tm->cm_callout);
1274 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1275 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1279 * Currently there should be no way we can hit this case. It only
1280 * happens when we have a failure to allocate chain frames, and
1281 * task management commands don't have S/G lists.
1283 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1284 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! "
1285 "This should not happen!\n", __func__, tm->cm_flags);
1286 mpssas_free_tm(sc, tm);
1290 if (reply == NULL) {
1291 mps_dprint(sc, MPS_RECOVERY,
1292 "NULL target reset reply for tm %pi TaskMID %u\n",
1293 tm, le16toh(req->TaskMID));
1294 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1295 /* this completion was due to a reset, just cleanup */
1296 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1297 "reset, ignoring NULL target reset reply\n");
1299 mpssas_free_tm(sc, tm);
1301 /* we should have gotten a reply. */
1302 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1303 "target reset attempt, resetting controller\n");
1309 mps_dprint(sc, MPS_RECOVERY,
1310 "target reset status 0x%x code 0x%x count %u\n",
1311 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1312 le32toh(reply->TerminationCount));
1314 if (targ->outstanding == 0) {
1315 /* we've finished recovery for this target and all
1316 * of its logical units.
1318 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1319 "Finished reset recovery for target %u\n", targ->tid);
1321 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
1325 mpssas_free_tm(sc, tm);
1328 * After a target reset, if this target still has
1329 * outstanding commands, the reset effectively failed,
1330 * regardless of the status reported. escalate.
1332 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1333 "Target reset complete for target %u, but still have %u "
1334 "command(s), resetting controller\n", targ->tid,
1340 #define MPS_RESET_TIMEOUT 30
1343 mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type)
1345 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1346 struct mpssas_target *target;
1349 target = tm->cm_targ;
1350 if (target->handle == 0) {
1351 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n",
1352 __func__, target->tid);
1356 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1357 req->DevHandle = htole16(target->handle);
1358 req->TaskType = type;
1360 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) {
1361 /* XXX Need to handle invalid LUNs */
1362 MPS_SET_LUN(req->LUN, tm->cm_lun);
1363 tm->cm_targ->logical_unit_resets++;
1364 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1365 "Sending logical unit reset to target %u lun %d\n",
1366 target->tid, tm->cm_lun);
1367 tm->cm_complete = mpssas_logical_unit_reset_complete;
1368 mpssas_prepare_for_tm(sc, tm, target, tm->cm_lun);
1369 } else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
1371 * Target reset method =
1372 * SAS Hard Link Reset / SATA Link Reset
1374 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
1375 tm->cm_targ->target_resets++;
1376 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1377 "Sending target reset to target %u\n", target->tid);
1378 tm->cm_complete = mpssas_target_reset_complete;
1379 mpssas_prepare_for_tm(sc, tm, target, CAM_LUN_WILDCARD);
1381 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type);
1386 tm->cm_complete_data = (void *)tm;
1388 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz,
1389 mpssas_tm_timeout, tm);
1391 err = mps_map_command(sc, tm);
1393 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1394 "error %d sending reset type %u\n",
1401 mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm)
1403 struct mps_command *cm;
1404 MPI2_SCSI_TASK_MANAGE_REPLY *reply;
1405 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1406 struct mpssas_target *targ;
1408 callout_stop(&tm->cm_callout);
1410 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1411 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
1415 * Currently there should be no way we can hit this case. It only
1416 * happens when we have a failure to allocate chain frames, and
1417 * task management commands don't have S/G lists.
1419 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
1420 mps_dprint(sc, MPS_RECOVERY,
1421 "cm_flags = %#x for abort %p TaskMID %u!\n",
1422 tm->cm_flags, tm, le16toh(req->TaskMID));
1423 mpssas_free_tm(sc, tm);
1427 if (reply == NULL) {
1428 mps_dprint(sc, MPS_RECOVERY,
1429 "NULL abort reply for tm %p TaskMID %u\n",
1430 tm, le16toh(req->TaskMID));
1431 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
1432 /* this completion was due to a reset, just cleanup */
1433 mps_dprint(sc, MPS_RECOVERY, "Hardware undergoing "
1434 "reset, ignoring NULL abort reply\n");
1436 mpssas_free_tm(sc, tm);
1438 /* we should have gotten a reply. */
1439 mps_dprint(sc, MPS_INFO|MPS_RECOVERY, "NULL reply on "
1440 "abort attempt, resetting controller\n");
1446 mps_dprint(sc, MPS_RECOVERY,
1447 "abort TaskMID %u status 0x%x code 0x%x count %u\n",
1448 le16toh(req->TaskMID),
1449 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode),
1450 le32toh(reply->TerminationCount));
1452 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands);
1455 * If there are no more timedout commands, we're done with
1456 * error recovery for this target.
1458 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1459 "Finished abort recovery for target %u\n", targ->tid);
1462 mpssas_free_tm(sc, tm);
1463 } else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) {
1464 /* abort success, but we have more timedout commands to abort */
1465 mps_dprint(sc, MPS_INFO|MPS_RECOVERY,
1466 "Continuing abort recovery for target %u\n", targ->tid);
1468 mpssas_send_abort(sc, tm, cm);
1470 /* we didn't get a command completion, so the abort
1471 * failed as far as we're concerned. escalate.
1473 mps_dprint(sc, MPS_RECOVERY,
1474 "Abort failed for target %u, sending logical unit reset\n",
1477 mpssas_send_reset(sc, tm,
1478 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET);
1482 #define MPS_ABORT_TIMEOUT 5
1485 mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm)
1487 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
1488 struct mpssas_target *targ;
1492 if (targ->handle == 0) {
1493 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1494 "%s null devhandle for target_id %d\n",
1495 __func__, cm->cm_ccb->ccb_h.target_id);
1499 mpssas_log_command(cm, MPS_RECOVERY|MPS_INFO,
1500 "Aborting command %p\n", cm);
1502 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
1503 req->DevHandle = htole16(targ->handle);
1504 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
1506 /* XXX Need to handle invalid LUNs */
1507 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun);
1509 req->TaskMID = htole16(cm->cm_desc.Default.SMID);
1512 tm->cm_complete = mpssas_abort_complete;
1513 tm->cm_complete_data = (void *)tm;
1514 tm->cm_targ = cm->cm_targ;
1515 tm->cm_lun = cm->cm_lun;
1517 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz,
1518 mpssas_tm_timeout, tm);
1522 mpssas_prepare_for_tm(sc, tm, targ, tm->cm_lun);
1524 err = mps_map_command(sc, tm);
1526 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1527 "error %d sending abort for cm %p SMID %u\n",
1528 err, cm, req->TaskMID);
1533 mpssas_scsiio_timeout(void *data)
1535 sbintime_t elapsed, now;
1537 struct mps_softc *sc;
1538 struct mps_command *cm;
1539 struct mpssas_target *targ;
1541 cm = (struct mps_command *)data;
1547 mtx_assert(&sc->mps_mtx, MA_OWNED);
1549 mps_dprint(sc, MPS_XINFO|MPS_RECOVERY, "Timeout checking cm %p\n", cm);
1552 * Run the interrupt handler to make sure it's not pending. This
1553 * isn't perfect because the command could have already completed
1554 * and been re-used, though this is unlikely.
1556 mps_intr_locked(sc);
1557 if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
1558 mpssas_log_command(cm, MPS_XINFO,
1559 "SCSI command %p almost timed out\n", cm);
1563 if (cm->cm_ccb == NULL) {
1564 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n");
1571 elapsed = now - ccb->ccb_h.qos.sim_data;
1572 mpssas_log_command(cm, MPS_INFO|MPS_RECOVERY,
1573 "Command timeout on target %u(0x%04x) %d set, %d.%d elapsed\n",
1574 targ->tid, targ->handle, ccb->ccb_h.timeout,
1575 sbintime_getsec(elapsed), elapsed & 0xffffffff);
1577 /* XXX first, check the firmware state, to see if it's still
1578 * operational. if not, do a diag reset.
1580 mpssas_set_ccbstatus(cm->cm_ccb, CAM_CMD_TIMEOUT);
1581 cm->cm_flags |= MPS_CM_FLAGS_ON_RECOVERY | MPS_CM_FLAGS_TIMEDOUT;
1582 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery);
1584 if (targ->tm != NULL) {
1585 /* target already in recovery, just queue up another
1586 * timedout command to be processed later.
1588 mps_dprint(sc, MPS_RECOVERY,
1589 "queued timedout cm %p for processing by tm %p\n",
1591 } else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) {
1592 mps_dprint(sc, MPS_RECOVERY|MPS_INFO,
1593 "Sending abort to target %u for SMID %d\n", targ->tid,
1594 cm->cm_desc.Default.SMID);
1595 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n",
1598 /* start recovery by aborting the first timedout command */
1599 mpssas_send_abort(sc, targ->tm, cm);
1601 /* XXX queue this target up for recovery once a TM becomes
1602 * available. The firmware only has a limited number of
1603 * HighPriority credits for the high priority requests used
1604 * for task management, and we ran out.
1606 * Isilon: don't worry about this for now, since we have
1607 * more credits than disks in an enclosure, and limit
1608 * ourselves to one TM per target for recovery.
1610 mps_dprint(sc, MPS_ERROR|MPS_RECOVERY,
1611 "timedout cm %p failed to allocate a tm\n", cm);
1617 mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb)
1619 MPI2_SCSI_IO_REQUEST *req;
1620 struct ccb_scsiio *csio;
1621 struct mps_softc *sc;
1622 struct mpssas_target *targ;
1623 struct mpssas_lun *lun;
1624 struct mps_command *cm;
1625 uint8_t i, lba_byte, *ref_tag_addr;
1626 uint16_t eedp_flags;
1627 uint32_t mpi_control;
1631 mtx_assert(&sc->mps_mtx, MA_OWNED);
1634 KASSERT(csio->ccb_h.target_id < sassc->maxtargets,
1635 ("Target %d out of bounds in XPT_SCSI_IO\n",
1636 csio->ccb_h.target_id));
1637 targ = &sassc->targets[csio->ccb_h.target_id];
1638 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags);
1639 if (targ->handle == 0x0) {
1640 if (targ->flags & MPSSAS_TARGET_INDIAGRESET) {
1641 mps_dprint(sc, MPS_ERROR,
1642 "%s NULL handle for target %u in diag reset freezing queue\n",
1643 __func__, csio->ccb_h.target_id);
1644 ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1645 xpt_freeze_devq(ccb->ccb_h.path, 1);
1649 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n",
1650 __func__, csio->ccb_h.target_id);
1651 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1655 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) {
1656 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO "
1657 "supported %u\n", __func__, csio->ccb_h.target_id);
1658 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1663 * Sometimes, it is possible to get a command that is not "In
1664 * Progress" and was actually aborted by the upper layer. Check for
1665 * this here and complete the command without error.
1667 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_INPROG) {
1668 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for "
1669 "target %u\n", __func__, csio->ccb_h.target_id);
1674 * If devinfo is 0 this will be a volume. In that case don't tell CAM
1675 * that the volume has timed out. We want volumes to be enumerated
1676 * until they are deleted/removed, not just failed. In either event,
1677 * we're removing the target due to a firmware event telling us
1678 * the device is now gone (as opposed to some transient event). Since
1679 * we're opting to remove failed devices from the OS's view, we need
1680 * to propagate that status up the stack.
1682 if (targ->flags & MPSSAS_TARGET_INREMOVAL) {
1683 if (targ->devinfo == 0)
1684 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
1686 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1691 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) {
1692 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__);
1693 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
1699 * If target has a reset in progress, the devq should be frozen.
1700 * Geting here we likely hit a race, so just requeue.
1702 if (targ->flags & MPSSAS_TARGET_INRESET) {
1703 ccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_DEV_QFRZN;
1704 mps_dprint(sc, MPS_INFO, "%s: Freezing devq for target ID %d\n",
1705 __func__, targ->tid);
1706 xpt_freeze_devq(ccb->ccb_h.path, 1);
1711 cm = mps_alloc_command(sc);
1712 if (cm == NULL || (sc->mps_flags & MPS_FLAGS_DIAGRESET)) {
1714 mps_free_command(sc, cm);
1716 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
1717 xpt_freeze_simq(sassc->sim, 1);
1718 sassc->flags |= MPSSAS_QUEUE_FROZEN;
1720 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1721 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1726 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req;
1727 bzero(req, sizeof(*req));
1728 req->DevHandle = htole16(targ->handle);
1729 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
1731 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr);
1732 req->SenseBufferLength = MPS_SENSE_LEN;
1734 req->ChainOffset = 0;
1735 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */
1740 req->DataLength = htole32(csio->dxfer_len);
1741 req->BidirectionalDataLength = 0;
1742 req->IoFlags = htole16(csio->cdb_len);
1745 /* Note: BiDirectional transfers are not supported */
1746 switch (csio->ccb_h.flags & CAM_DIR_MASK) {
1748 mpi_control = MPI2_SCSIIO_CONTROL_READ;
1749 cm->cm_flags |= MPS_CM_FLAGS_DATAIN;
1752 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
1753 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT;
1757 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
1761 if (csio->cdb_len == 32)
1762 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
1764 * It looks like the hardware doesn't require an explicit tag
1765 * number for each transaction. SAM Task Management not supported
1768 switch (csio->tag_action) {
1769 case MSG_HEAD_OF_Q_TAG:
1770 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
1772 case MSG_ORDERED_Q_TAG:
1773 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
1776 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ;
1778 case CAM_TAG_ACTION_NONE:
1779 case MSG_SIMPLE_Q_TAG:
1781 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
1784 mpi_control |= (csio->priority << MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT) &
1785 MPI2_SCSIIO_CONTROL_TASKPRI_MASK;
1786 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits;
1787 req->Control = htole32(mpi_control);
1788 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) {
1789 mps_free_command(sc, cm);
1790 mpssas_set_ccbstatus(ccb, CAM_LUN_INVALID);
1795 if (csio->ccb_h.flags & CAM_CDB_POINTER)
1796 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len);
1798 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len);
1799 req->IoFlags = htole16(csio->cdb_len);
1802 * Check if EEDP is supported and enabled. If it is then check if the
1803 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and
1804 * is formatted for EEDP support. If all of this is true, set CDB up
1805 * for EEDP transfer.
1807 eedp_flags = op_code_prot[req->CDB.CDB32[0]];
1808 if (sc->eedp_enabled && eedp_flags) {
1809 SLIST_FOREACH(lun, &targ->luns, lun_link) {
1810 if (lun->lun_id == csio->ccb_h.target_lun) {
1815 if ((lun != NULL) && (lun->eedp_formatted)) {
1816 req->EEDPBlockSize = htole16(lun->eedp_block_size);
1817 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1818 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1819 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1820 req->EEDPFlags = htole16(eedp_flags);
1823 * If CDB less than 32, fill in Primary Ref Tag with
1824 * low 4 bytes of LBA. If CDB is 32, tag stuff is
1825 * already there. Also, set protection bit. FreeBSD
1826 * currently does not support CDBs bigger than 16, but
1827 * the code doesn't hurt, and will be here for the
1830 if (csio->cdb_len != 32) {
1831 lba_byte = (csio->cdb_len == 16) ? 6 : 2;
1832 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32.
1833 PrimaryReferenceTag;
1834 for (i = 0; i < 4; i++) {
1836 req->CDB.CDB32[lba_byte + i];
1839 req->CDB.EEDP32.PrimaryReferenceTag =
1840 htole32(req->CDB.EEDP32.PrimaryReferenceTag);
1841 req->CDB.EEDP32.PrimaryApplicationTagMask =
1843 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) |
1847 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG;
1848 req->EEDPFlags = htole16(eedp_flags);
1849 req->CDB.CDB32[10] = (req->CDB.CDB32[10] &
1855 cm->cm_length = csio->dxfer_len;
1856 if (cm->cm_length != 0) {
1858 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB;
1862 cm->cm_sge = &req->SGL;
1863 cm->cm_sglsize = (32 - 24) * 4;
1864 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1865 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle);
1866 cm->cm_complete = mpssas_scsiio_complete;
1867 cm->cm_complete_data = ccb;
1869 cm->cm_lun = csio->ccb_h.target_lun;
1873 * If HBA is a WD and the command is not for a retry, try to build a
1874 * direct I/O message. If failed, or the command is for a retry, send
1875 * the I/O to the IR volume itself.
1877 if (sc->WD_valid_config) {
1878 if (ccb->ccb_h.sim_priv.entries[0].field == MPS_WD_RETRY) {
1879 mpssas_direct_drive_io(sassc, cm, ccb);
1881 mpssas_set_ccbstatus(ccb, CAM_REQ_INPROG);
1885 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
1886 if (csio->bio != NULL)
1887 biotrack(csio->bio, __func__);
1889 csio->ccb_h.qos.sim_data = sbinuptime();
1890 callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0,
1891 mpssas_scsiio_timeout, cm, 0);
1894 targ->outstanding++;
1895 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link);
1896 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1898 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n",
1899 __func__, cm, ccb, targ->outstanding);
1901 mps_map_command(sc, cm);
1906 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request
1909 mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio,
1910 Mpi2SCSIIOReply_t *mpi_reply)
1914 u16 ioc_status = le16toh(mpi_reply->IOCStatus) &
1915 MPI2_IOCSTATUS_MASK;
1916 u8 scsi_state = mpi_reply->SCSIState;
1917 u8 scsi_status = mpi_reply->SCSIStatus;
1918 u32 log_info = le32toh(mpi_reply->IOCLogInfo);
1919 const char *desc_ioc_state, *desc_scsi_status;
1921 if (log_info == 0x31170000)
1924 desc_ioc_state = mps_describe_table(mps_iocstatus_string,
1926 desc_scsi_status = mps_describe_table(mps_scsi_status_string,
1929 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n",
1930 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status);
1933 *We can add more detail about underflow data here
1936 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), "
1937 "scsi_state %b\n", desc_scsi_status, scsi_status,
1938 scsi_state, "\20" "\1AutosenseValid" "\2AutosenseFailed"
1939 "\3NoScsiStatus" "\4Terminated" "\5Response InfoValid");
1941 if (sc->mps_debug & MPS_XINFO &&
1942 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
1943 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n");
1944 scsi_sense_print(csio);
1945 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n");
1948 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
1949 response_info = le32toh(mpi_reply->ResponseInfo);
1950 response_bytes = (u8 *)&response_info;
1951 mps_dprint(sc, MPS_XINFO, "response code(0x%1x): %s\n",
1953 mps_describe_table(mps_scsi_taskmgmt_string,
1954 response_bytes[0]));
1959 mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm)
1961 MPI2_SCSI_IO_REPLY *rep;
1963 struct ccb_scsiio *csio;
1964 struct mpssas_softc *sassc;
1965 struct scsi_vpd_supported_page_list *vpd_list = NULL;
1966 u8 *TLR_bits, TLR_on;
1969 struct mpssas_target *target;
1970 target_id_t target_id;
1973 mps_dprint(sc, MPS_TRACE,
1974 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm,
1975 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply,
1976 cm->cm_targ->outstanding);
1978 callout_stop(&cm->cm_callout);
1979 mtx_assert(&sc->mps_mtx, MA_OWNED);
1982 ccb = cm->cm_complete_data;
1984 target_id = csio->ccb_h.target_id;
1985 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply;
1987 * XXX KDM if the chain allocation fails, does it matter if we do
1988 * the sync and unload here? It is simpler to do it in every case,
1989 * assuming it doesn't cause problems.
1991 if (cm->cm_data != NULL) {
1992 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN)
1993 dir = BUS_DMASYNC_POSTREAD;
1994 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT)
1995 dir = BUS_DMASYNC_POSTWRITE;
1996 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1997 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2000 cm->cm_targ->completed++;
2001 cm->cm_targ->outstanding--;
2002 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link);
2003 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED);
2005 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
2006 if (ccb->csio.bio != NULL)
2007 biotrack(ccb->csio.bio, __func__);
2010 if (cm->cm_flags & MPS_CM_FLAGS_ON_RECOVERY) {
2011 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery);
2012 KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
2013 ("Not busy for CM_FLAGS_TIMEDOUT: %u\n", cm->cm_state));
2014 cm->cm_flags &= ~MPS_CM_FLAGS_ON_RECOVERY;
2015 if (cm->cm_reply != NULL)
2016 mpssas_log_command(cm, MPS_RECOVERY,
2017 "completed timedout cm %p ccb %p during recovery "
2018 "ioc %x scsi %x state %x xfer %u\n",
2019 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2020 rep->SCSIStatus, rep->SCSIState,
2021 le32toh(rep->TransferCount));
2023 mpssas_log_command(cm, MPS_RECOVERY,
2024 "completed timedout cm %p ccb %p during recovery\n",
2026 } else if (cm->cm_targ->tm != NULL) {
2027 if (cm->cm_reply != NULL)
2028 mpssas_log_command(cm, MPS_RECOVERY,
2029 "completed cm %p ccb %p during recovery "
2030 "ioc %x scsi %x state %x xfer %u\n",
2031 cm, cm->cm_ccb, le16toh(rep->IOCStatus),
2032 rep->SCSIStatus, rep->SCSIState,
2033 le32toh(rep->TransferCount));
2035 mpssas_log_command(cm, MPS_RECOVERY,
2036 "completed cm %p ccb %p during recovery\n",
2038 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) {
2039 mpssas_log_command(cm, MPS_RECOVERY,
2040 "reset completed cm %p ccb %p\n",
2044 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2046 * We ran into an error after we tried to map the command,
2047 * so we're getting a callback without queueing the command
2048 * to the hardware. So we set the status here, and it will
2049 * be retained below. We'll go through the "fast path",
2050 * because there can be no reply when we haven't actually
2051 * gone out to the hardware.
2053 mpssas_set_ccbstatus(ccb, CAM_REQUEUE_REQ);
2056 * Currently the only error included in the mask is
2057 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of
2058 * chain frames. We need to freeze the queue until we get
2059 * a command that completed without this error, which will
2060 * hopefully have some chain frames attached that we can
2061 * use. If we wanted to get smarter about it, we would
2062 * only unfreeze the queue in this condition when we're
2063 * sure that we're getting some chain frames back. That's
2064 * probably unnecessary.
2066 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) {
2067 xpt_freeze_simq(sassc->sim, 1);
2068 sassc->flags |= MPSSAS_QUEUE_FROZEN;
2069 mps_dprint(sc, MPS_XINFO, "Error sending command, "
2070 "freezing SIM queue\n");
2075 * If this is a Start Stop Unit command and it was issued by the driver
2076 * during shutdown, decrement the refcount to account for all of the
2077 * commands that were sent. All SSU commands should be completed before
2078 * shutdown completes, meaning SSU_refcount will be 0 after SSU_started
2081 if (sc->SSU_started && (csio->cdb_io.cdb_bytes[0] == START_STOP_UNIT)) {
2082 mps_dprint(sc, MPS_INFO, "Decrementing SSU count.\n");
2086 /* Take the fast path to completion */
2087 if (cm->cm_reply == NULL) {
2088 if (mpssas_get_ccbstatus(ccb) == CAM_REQ_INPROG) {
2089 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0)
2090 mpssas_set_ccbstatus(ccb, CAM_SCSI_BUS_RESET);
2092 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2093 ccb->csio.scsi_status = SCSI_STATUS_OK;
2095 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2096 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2097 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2098 mps_dprint(sc, MPS_XINFO,
2099 "Unfreezing SIM queue\n");
2104 * There are two scenarios where the status won't be
2105 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is
2106 * set, the second is in the MPS_FLAGS_DIAGRESET above.
2108 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2110 * Freeze the dev queue so that commands are
2111 * executed in the correct order after error
2114 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2115 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2117 mps_free_command(sc, cm);
2122 mpssas_log_command(cm, MPS_XINFO,
2123 "ioc %x scsi %x state %x xfer %u\n",
2124 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState,
2125 le32toh(rep->TransferCount));
2128 * If this is a Direct Drive I/O, reissue the I/O to the original IR
2129 * Volume if an error occurred (normal I/O retry). Use the original
2130 * CCB, but set a flag that this will be a retry so that it's sent to
2131 * the original volume. Free the command but reuse the CCB.
2133 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) {
2134 mps_free_command(sc, cm);
2135 ccb->ccb_h.sim_priv.entries[0].field = MPS_WD_RETRY;
2136 mpssas_action_scsiio(sassc, ccb);
2139 ccb->ccb_h.sim_priv.entries[0].field = 0;
2141 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) {
2142 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
2143 csio->resid = cm->cm_length - le32toh(rep->TransferCount);
2145 case MPI2_IOCSTATUS_SUCCESS:
2146 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
2148 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) ==
2149 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR)
2150 mpssas_log_command(cm, MPS_XINFO, "recovered error\n");
2152 /* Completion failed at the transport level. */
2153 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS |
2154 MPI2_SCSI_STATE_TERMINATED)) {
2155 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2159 /* In a modern packetized environment, an autosense failure
2160 * implies that there's not much else that can be done to
2161 * recover the command.
2163 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) {
2164 mpssas_set_ccbstatus(ccb, CAM_AUTOSENSE_FAIL);
2169 * CAM doesn't care about SAS Response Info data, but if this is
2170 * the state check if TLR should be done. If not, clear the
2171 * TLR_bits for the target.
2173 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) &&
2174 ((le32toh(rep->ResponseInfo) &
2175 MPI2_SCSI_RI_MASK_REASONCODE) ==
2176 MPS_SCSI_RI_INVALID_FRAME)) {
2177 sc->mapping_table[target_id].TLR_bits =
2178 (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2182 * Intentionally override the normal SCSI status reporting
2183 * for these two cases. These are likely to happen in a
2184 * multi-initiator environment, and we want to make sure that
2185 * CAM retries these commands rather than fail them.
2187 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) ||
2188 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) {
2189 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2193 /* Handle normal status and sense */
2194 csio->scsi_status = rep->SCSIStatus;
2195 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD)
2196 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2198 mpssas_set_ccbstatus(ccb, CAM_SCSI_STATUS_ERROR);
2200 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
2201 int sense_len, returned_sense_len;
2203 returned_sense_len = min(le32toh(rep->SenseCount),
2204 sizeof(struct scsi_sense_data));
2205 if (returned_sense_len < ccb->csio.sense_len)
2206 ccb->csio.sense_resid = ccb->csio.sense_len -
2209 ccb->csio.sense_resid = 0;
2211 sense_len = min(returned_sense_len,
2212 ccb->csio.sense_len - ccb->csio.sense_resid);
2213 bzero(&ccb->csio.sense_data,
2214 sizeof(ccb->csio.sense_data));
2215 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len);
2216 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2220 * Check if this is an INQUIRY command. If it's a VPD inquiry,
2221 * and it's page code 0 (Supported Page List), and there is
2222 * inquiry data, and this is for a sequential access device, and
2223 * the device is an SSP target, and TLR is supported by the
2224 * controller, turn the TLR_bits value ON if page 0x90 is
2227 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2228 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) &&
2229 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) &&
2230 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) &&
2231 (csio->data_ptr != NULL) &&
2232 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) &&
2233 (sc->control_TLR) &&
2234 (sc->mapping_table[target_id].device_info &
2235 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) {
2236 vpd_list = (struct scsi_vpd_supported_page_list *)
2238 TLR_bits = &sc->mapping_table[target_id].TLR_bits;
2239 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR;
2240 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON;
2241 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) +
2242 csio->cdb_io.cdb_bytes[4];
2243 alloc_len -= csio->resid;
2244 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) {
2245 if (vpd_list->list[i] == 0x90) {
2253 * If this is a SATA direct-access end device, mark it so that
2254 * a SCSI StartStopUnit command will be sent to it when the
2255 * driver is being shutdown.
2257 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) &&
2258 ((csio->data_ptr[0] & 0x1f) == T_DIRECT) &&
2259 (sc->mapping_table[target_id].device_info &
2260 MPI2_SAS_DEVICE_INFO_SATA_DEVICE) &&
2261 ((sc->mapping_table[target_id].device_info &
2262 MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) ==
2263 MPI2_SAS_DEVICE_INFO_END_DEVICE)) {
2264 target = &sassc->targets[target_id];
2265 target->supports_SSU = TRUE;
2266 mps_dprint(sc, MPS_XINFO, "Target %d supports SSU\n",
2270 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
2271 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2273 * If devinfo is 0 this will be a volume. In that case don't
2274 * tell CAM that the volume is not there. We want volumes to
2275 * be enumerated until they are deleted/removed, not just
2278 if (cm->cm_targ->devinfo == 0)
2279 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2281 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2283 case MPI2_IOCSTATUS_INVALID_SGL:
2284 mps_print_scsiio_cmd(sc, cm);
2285 mpssas_set_ccbstatus(ccb, CAM_UNREC_HBA_ERROR);
2287 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
2289 * This is one of the responses that comes back when an I/O
2290 * has been aborted. If it is because of a timeout that we
2291 * initiated, just set the status to CAM_CMD_TIMEOUT.
2292 * Otherwise set it to CAM_REQ_ABORTED. The effect on the
2293 * command is the same (it gets retried, subject to the
2294 * retry counter), the only difference is what gets printed
2297 if (cm->cm_flags & MPS_CM_FLAGS_TIMEDOUT)
2298 mpssas_set_ccbstatus(ccb, CAM_CMD_TIMEOUT);
2300 mpssas_set_ccbstatus(ccb, CAM_REQ_ABORTED);
2302 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
2303 /* resid is ignored for this condition */
2305 mpssas_set_ccbstatus(ccb, CAM_DATA_RUN_ERR);
2307 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
2308 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
2310 * These can sometimes be transient transport-related
2311 * errors, and sometimes persistent drive-related errors.
2312 * We used to retry these without decrementing the retry
2313 * count by returning CAM_REQUEUE_REQ. Unfortunately, if
2314 * we hit a persistent drive problem that returns one of
2315 * these error codes, we would retry indefinitely. So,
2316 * return CAM_REQ_CMP_ERROR so that we decrement the retry
2317 * count and avoid infinite retries. We're taking the
2318 * potential risk of flagging false failures in the event
2319 * of a topology-related error (e.g. a SAS expander problem
2320 * causes a command addressed to a drive to fail), but
2321 * avoiding getting into an infinite retry loop. However,
2322 * if we get them while were moving a device, we should
2323 * fail the request as 'not there' because the device
2324 * is effectively gone.
2326 if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL)
2327 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2329 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2330 mps_dprint(sc, MPS_INFO,
2331 "Controller reported %s tgt %u SMID %u loginfo %x%s\n",
2332 mps_describe_table(mps_iocstatus_string,
2333 le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK),
2334 target_id, cm->cm_desc.Default.SMID,
2335 le32toh(rep->IOCLogInfo),
2336 (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) ? " departing" : "");
2337 mps_dprint(sc, MPS_XINFO,
2338 "SCSIStatus %x SCSIState %x xfercount %u\n",
2339 rep->SCSIStatus, rep->SCSIState,
2340 le32toh(rep->TransferCount));
2342 case MPI2_IOCSTATUS_INVALID_FUNCTION:
2343 case MPI2_IOCSTATUS_INTERNAL_ERROR:
2344 case MPI2_IOCSTATUS_INVALID_VPID:
2345 case MPI2_IOCSTATUS_INVALID_FIELD:
2346 case MPI2_IOCSTATUS_INVALID_STATE:
2347 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
2348 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
2349 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2350 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2351 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2353 mpssas_log_command(cm, MPS_XINFO,
2354 "completed ioc %x loginfo %x scsi %x state %x xfer %u\n",
2355 le16toh(rep->IOCStatus), le32toh(rep->IOCLogInfo),
2356 rep->SCSIStatus, rep->SCSIState,
2357 le32toh(rep->TransferCount));
2358 csio->resid = cm->cm_length;
2359 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2363 mps_sc_failed_io_info(sc,csio,rep);
2365 if (sassc->flags & MPSSAS_QUEUE_FROZEN) {
2366 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2367 sassc->flags &= ~MPSSAS_QUEUE_FROZEN;
2368 mps_dprint(sc, MPS_XINFO, "Command completed, "
2369 "unfreezing SIM queue\n");
2372 if (mpssas_get_ccbstatus(ccb) != CAM_REQ_CMP) {
2373 ccb->ccb_h.status |= CAM_DEV_QFRZN;
2374 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1);
2378 * Check to see if we're removing the device. If so, and this is the
2379 * last command on the queue, proceed with the deferred removal of the
2380 * device. Note, for removing a volume, this won't trigger because
2381 * pending_remove_tm will be NULL.
2383 if (cm->cm_targ->flags & MPSSAS_TARGET_INREMOVAL) {
2384 if (TAILQ_FIRST(&cm->cm_targ->commands) == NULL &&
2385 cm->cm_targ->pending_remove_tm != NULL) {
2386 mps_dprint(sc, MPS_INFO, "Last pending command complete: starting remove_device\n");
2387 mps_map_command(sc, cm->cm_targ->pending_remove_tm);
2388 cm->cm_targ->pending_remove_tm = NULL;
2392 mps_free_command(sc, cm);
2396 /* All Request reached here are Endian safe */
2398 mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm,
2400 pMpi2SCSIIORequest_t pIO_req;
2401 struct mps_softc *sc = sassc->sc;
2403 uint32_t physLBA, stripe_offset, stripe_unit;
2404 uint32_t io_size, column;
2405 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB;
2408 * If this is a valid SCSI command (Read6, Read10, Read16, Write6,
2409 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O
2410 * will be sent to the IR volume itself. Since Read6 and Write6 are a
2411 * bit different than the 10/16 CDBs, handle them separately.
2413 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req;
2414 CDB = pIO_req->CDB.CDB32;
2417 * Handle 6 byte CDBs.
2419 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) ||
2420 (CDB[0] == WRITE_6))) {
2422 * Get the transfer size in blocks.
2424 io_size = (cm->cm_length >> sc->DD_block_exponent);
2427 * Get virtual LBA given in the CDB.
2429 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) |
2430 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3];
2433 * Check that LBA range for I/O does not exceed volume's
2436 if ((virtLBA + (uint64_t)io_size - 1) <=
2439 * Check if the I/O crosses a stripe boundary. If not,
2440 * translate the virtual LBA to a physical LBA and set
2441 * the DevHandle for the PhysDisk to be used. If it
2442 * does cross a boundary, do normal I/O. To get the
2443 * right DevHandle to use, get the map number for the
2444 * column, then use that map number to look up the
2445 * DevHandle of the PhysDisk.
2447 stripe_offset = (uint32_t)virtLBA &
2448 (sc->DD_stripe_size - 1);
2449 if ((stripe_offset + io_size) <= sc->DD_stripe_size) {
2450 physLBA = (uint32_t)virtLBA >>
2451 sc->DD_stripe_exponent;
2452 stripe_unit = physLBA / sc->DD_num_phys_disks;
2453 column = physLBA % sc->DD_num_phys_disks;
2454 pIO_req->DevHandle =
2455 htole16(sc->DD_column_map[column].dev_handle);
2456 /* ???? Is this endian safe*/
2457 cm->cm_desc.SCSIIO.DevHandle =
2460 physLBA = (stripe_unit <<
2461 sc->DD_stripe_exponent) + stripe_offset;
2462 ptrLBA = &pIO_req->CDB.CDB32[1];
2463 physLBA_byte = (uint8_t)(physLBA >> 16);
2464 *ptrLBA = physLBA_byte;
2465 ptrLBA = &pIO_req->CDB.CDB32[2];
2466 physLBA_byte = (uint8_t)(physLBA >> 8);
2467 *ptrLBA = physLBA_byte;
2468 ptrLBA = &pIO_req->CDB.CDB32[3];
2469 physLBA_byte = (uint8_t)physLBA;
2470 *ptrLBA = physLBA_byte;
2473 * Set flag that Direct Drive I/O is
2476 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2483 * Handle 10, 12 or 16 byte CDBs.
2485 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) ||
2486 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) ||
2487 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) ||
2488 (CDB[0] == WRITE_12))) {
2490 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB
2491 * are 0. If not, this is accessing beyond 2TB so handle it in
2492 * the else section. 10-byte and 12-byte CDB's are OK.
2493 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is
2494 * ready to accept 12byte CDB for Direct IOs.
2496 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) ||
2497 (CDB[0] == READ_12 || CDB[0] == WRITE_12) ||
2498 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) {
2500 * Get the transfer size in blocks.
2502 io_size = (cm->cm_length >> sc->DD_block_exponent);
2505 * Get virtual LBA. Point to correct lower 4 bytes of
2506 * LBA in the CDB depending on command.
2508 lba_idx = ((CDB[0] == READ_12) ||
2509 (CDB[0] == WRITE_12) ||
2510 (CDB[0] == READ_10) ||
2511 (CDB[0] == WRITE_10))? 2 : 6;
2512 virtLBA = ((uint64_t)CDB[lba_idx] << 24) |
2513 ((uint64_t)CDB[lba_idx + 1] << 16) |
2514 ((uint64_t)CDB[lba_idx + 2] << 8) |
2515 (uint64_t)CDB[lba_idx + 3];
2518 * Check that LBA range for I/O does not exceed volume's
2521 if ((virtLBA + (uint64_t)io_size - 1) <=
2524 * Check if the I/O crosses a stripe boundary.
2525 * If not, translate the virtual LBA to a
2526 * physical LBA and set the DevHandle for the
2527 * PhysDisk to be used. If it does cross a
2528 * boundary, do normal I/O. To get the right
2529 * DevHandle to use, get the map number for the
2530 * column, then use that map number to look up
2531 * the DevHandle of the PhysDisk.
2533 stripe_offset = (uint32_t)virtLBA &
2534 (sc->DD_stripe_size - 1);
2535 if ((stripe_offset + io_size) <=
2536 sc->DD_stripe_size) {
2537 physLBA = (uint32_t)virtLBA >>
2538 sc->DD_stripe_exponent;
2539 stripe_unit = physLBA /
2540 sc->DD_num_phys_disks;
2542 sc->DD_num_phys_disks;
2543 pIO_req->DevHandle =
2544 htole16(sc->DD_column_map[column].
2546 cm->cm_desc.SCSIIO.DevHandle =
2549 physLBA = (stripe_unit <<
2550 sc->DD_stripe_exponent) +
2553 &pIO_req->CDB.CDB32[lba_idx];
2554 physLBA_byte = (uint8_t)(physLBA >> 24);
2555 *ptrLBA = physLBA_byte;
2557 &pIO_req->CDB.CDB32[lba_idx + 1];
2558 physLBA_byte = (uint8_t)(physLBA >> 16);
2559 *ptrLBA = physLBA_byte;
2561 &pIO_req->CDB.CDB32[lba_idx + 2];
2562 physLBA_byte = (uint8_t)(physLBA >> 8);
2563 *ptrLBA = physLBA_byte;
2565 &pIO_req->CDB.CDB32[lba_idx + 3];
2566 physLBA_byte = (uint8_t)physLBA;
2567 *ptrLBA = physLBA_byte;
2570 * Set flag that Direct Drive I/O is
2573 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2578 * 16-byte CDB and the upper 4 bytes of the CDB are not
2579 * 0. Get the transfer size in blocks.
2581 io_size = (cm->cm_length >> sc->DD_block_exponent);
2586 virtLBA = ((uint64_t)CDB[2] << 54) |
2587 ((uint64_t)CDB[3] << 48) |
2588 ((uint64_t)CDB[4] << 40) |
2589 ((uint64_t)CDB[5] << 32) |
2590 ((uint64_t)CDB[6] << 24) |
2591 ((uint64_t)CDB[7] << 16) |
2592 ((uint64_t)CDB[8] << 8) |
2596 * Check that LBA range for I/O does not exceed volume's
2599 if ((virtLBA + (uint64_t)io_size - 1) <=
2602 * Check if the I/O crosses a stripe boundary.
2603 * If not, translate the virtual LBA to a
2604 * physical LBA and set the DevHandle for the
2605 * PhysDisk to be used. If it does cross a
2606 * boundary, do normal I/O. To get the right
2607 * DevHandle to use, get the map number for the
2608 * column, then use that map number to look up
2609 * the DevHandle of the PhysDisk.
2611 stripe_offset = (uint32_t)virtLBA &
2612 (sc->DD_stripe_size - 1);
2613 if ((stripe_offset + io_size) <=
2614 sc->DD_stripe_size) {
2615 physLBA = (uint32_t)(virtLBA >>
2616 sc->DD_stripe_exponent);
2617 stripe_unit = physLBA /
2618 sc->DD_num_phys_disks;
2620 sc->DD_num_phys_disks;
2621 pIO_req->DevHandle =
2622 htole16(sc->DD_column_map[column].
2624 cm->cm_desc.SCSIIO.DevHandle =
2627 physLBA = (stripe_unit <<
2628 sc->DD_stripe_exponent) +
2632 * Set upper 4 bytes of LBA to 0. We
2633 * assume that the phys disks are less
2634 * than 2 TB's in size. Then, set the
2637 pIO_req->CDB.CDB32[2] = 0;
2638 pIO_req->CDB.CDB32[3] = 0;
2639 pIO_req->CDB.CDB32[4] = 0;
2640 pIO_req->CDB.CDB32[5] = 0;
2641 ptrLBA = &pIO_req->CDB.CDB32[6];
2642 physLBA_byte = (uint8_t)(physLBA >> 24);
2643 *ptrLBA = physLBA_byte;
2644 ptrLBA = &pIO_req->CDB.CDB32[7];
2645 physLBA_byte = (uint8_t)(physLBA >> 16);
2646 *ptrLBA = physLBA_byte;
2647 ptrLBA = &pIO_req->CDB.CDB32[8];
2648 physLBA_byte = (uint8_t)(physLBA >> 8);
2649 *ptrLBA = physLBA_byte;
2650 ptrLBA = &pIO_req->CDB.CDB32[9];
2651 physLBA_byte = (uint8_t)physLBA;
2652 *ptrLBA = physLBA_byte;
2655 * Set flag that Direct Drive I/O is
2658 cm->cm_flags |= MPS_CM_FLAGS_DD_IO;
2666 mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm)
2668 MPI2_SMP_PASSTHROUGH_REPLY *rpl;
2669 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2673 ccb = cm->cm_complete_data;
2676 * Currently there should be no way we can hit this case. It only
2677 * happens when we have a failure to allocate chain frames, and SMP
2678 * commands require two S/G elements only. That should be handled
2679 * in the standard request size.
2681 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
2682 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n",
2683 __func__, cm->cm_flags);
2684 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2688 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply;
2690 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__);
2691 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2695 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2696 sasaddr = le32toh(req->SASAddress.Low);
2697 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32;
2699 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) !=
2700 MPI2_IOCSTATUS_SUCCESS ||
2701 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) {
2702 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n",
2703 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus);
2704 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
2708 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address "
2709 "%#jx completed successfully\n", __func__,
2710 (uintmax_t)sasaddr);
2712 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED)
2713 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
2715 mpssas_set_ccbstatus(ccb, CAM_SMP_STATUS_ERROR);
2719 * We sync in both directions because we had DMAs in the S/G list
2720 * in both directions.
2722 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
2723 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2724 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
2725 mps_free_command(sc, cm);
2730 mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr)
2732 struct mps_command *cm;
2733 uint8_t *request, *response;
2734 MPI2_SMP_PASSTHROUGH_REQUEST *req;
2735 struct mps_softc *sc;
2742 * XXX We don't yet support physical addresses here.
2744 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) {
2745 case CAM_DATA_PADDR:
2746 case CAM_DATA_SG_PADDR:
2747 mps_dprint(sc, MPS_ERROR,
2748 "%s: physical addresses not supported\n", __func__);
2749 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2754 * The chip does not support more than one buffer for the
2755 * request or response.
2757 if ((ccb->smpio.smp_request_sglist_cnt > 1)
2758 || (ccb->smpio.smp_response_sglist_cnt > 1)) {
2759 mps_dprint(sc, MPS_ERROR,
2760 "%s: multiple request or response "
2761 "buffer segments not supported for SMP\n",
2763 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2769 * The CAM_SCATTER_VALID flag was originally implemented
2770 * for the XPT_SCSI_IO CCB, which only has one data pointer.
2771 * We have two. So, just take that flag to mean that we
2772 * might have S/G lists, and look at the S/G segment count
2773 * to figure out whether that is the case for each individual
2776 if (ccb->smpio.smp_request_sglist_cnt != 0) {
2777 bus_dma_segment_t *req_sg;
2779 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request;
2780 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr;
2782 request = ccb->smpio.smp_request;
2784 if (ccb->smpio.smp_response_sglist_cnt != 0) {
2785 bus_dma_segment_t *rsp_sg;
2787 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response;
2788 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr;
2790 response = ccb->smpio.smp_response;
2792 case CAM_DATA_VADDR:
2793 request = ccb->smpio.smp_request;
2794 response = ccb->smpio.smp_response;
2797 mpssas_set_ccbstatus(ccb, CAM_REQ_INVALID);
2802 cm = mps_alloc_command(sc);
2804 mps_dprint(sc, MPS_ERROR,
2805 "%s: cannot allocate command\n", __func__);
2806 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2811 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req;
2812 bzero(req, sizeof(*req));
2813 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
2815 /* Allow the chip to use any route to this SAS address. */
2816 req->PhysicalPort = 0xff;
2818 req->RequestDataLength = htole16(ccb->smpio.smp_request_len);
2820 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI;
2822 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS "
2823 "address %#jx\n", __func__, (uintmax_t)sasaddr);
2825 mpi_init_sge(cm, req, &req->SGL);
2828 * Set up a uio to pass into mps_map_command(). This allows us to
2829 * do one map command, and one busdma call in there.
2831 cm->cm_uio.uio_iov = cm->cm_iovec;
2832 cm->cm_uio.uio_iovcnt = 2;
2833 cm->cm_uio.uio_segflg = UIO_SYSSPACE;
2836 * The read/write flag isn't used by busdma, but set it just in
2837 * case. This isn't exactly accurate, either, since we're going in
2840 cm->cm_uio.uio_rw = UIO_WRITE;
2842 cm->cm_iovec[0].iov_base = request;
2843 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength);
2844 cm->cm_iovec[1].iov_base = response;
2845 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len;
2847 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len +
2848 cm->cm_iovec[1].iov_len;
2851 * Trigger a warning message in mps_data_cb() for the user if we
2852 * wind up exceeding two S/G segments. The chip expects one
2853 * segment for the request and another for the response.
2855 cm->cm_max_segs = 2;
2857 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2858 cm->cm_complete = mpssas_smpio_complete;
2859 cm->cm_complete_data = ccb;
2862 * Tell the mapping code that we're using a uio, and that this is
2863 * an SMP passthrough request. There is a little special-case
2864 * logic there (in mps_data_cb()) to handle the bidirectional
2867 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS |
2868 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT;
2870 /* The chip data format is little endian. */
2871 req->SASAddress.High = htole32(sasaddr >> 32);
2872 req->SASAddress.Low = htole32(sasaddr);
2875 * XXX Note that we don't have a timeout/abort mechanism here.
2876 * From the manual, it looks like task management requests only
2877 * work for SCSI IO and SATA passthrough requests. We may need to
2878 * have a mechanism to retry requests in the event of a chip reset
2879 * at least. Hopefully the chip will insure that any errors short
2880 * of that are relayed back to the driver.
2882 error = mps_map_command(sc, cm);
2883 if ((error != 0) && (error != EINPROGRESS)) {
2884 mps_dprint(sc, MPS_ERROR,
2885 "%s: error %d returned from mps_map_command()\n",
2893 mps_free_command(sc, cm);
2894 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
2901 mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb)
2903 struct mps_softc *sc;
2904 struct mpssas_target *targ;
2905 uint64_t sasaddr = 0;
2910 * Make sure the target exists.
2912 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
2913 ("Target %d out of bounds in XPT_SMP_IO\n", ccb->ccb_h.target_id));
2914 targ = &sassc->targets[ccb->ccb_h.target_id];
2915 if (targ->handle == 0x0) {
2916 mps_dprint(sc, MPS_ERROR,
2917 "%s: target %d does not exist!\n", __func__,
2918 ccb->ccb_h.target_id);
2919 mpssas_set_ccbstatus(ccb, CAM_SEL_TIMEOUT);
2925 * If this device has an embedded SMP target, we'll talk to it
2927 * figure out what the expander's address is.
2929 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0)
2930 sasaddr = targ->sasaddr;
2933 * If we don't have a SAS address for the expander yet, try
2934 * grabbing it from the page 0x83 information cached in the
2935 * transport layer for this target. LSI expanders report the
2936 * expander SAS address as the port-associated SAS address in
2937 * Inquiry VPD page 0x83. Maxim expanders don't report it in page
2940 * XXX KDM disable this for now, but leave it commented out so that
2941 * it is obvious that this is another possible way to get the SAS
2944 * The parent handle method below is a little more reliable, and
2945 * the other benefit is that it works for devices other than SES
2946 * devices. So you can send a SMP request to a da(4) device and it
2947 * will get routed to the expander that device is attached to.
2948 * (Assuming the da(4) device doesn't contain an SMP target...)
2952 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path);
2956 * If we still don't have a SAS address for the expander, look for
2957 * the parent device of this device, which is probably the expander.
2960 #ifdef OLD_MPS_PROBE
2961 struct mpssas_target *parent_target;
2964 if (targ->parent_handle == 0x0) {
2965 mps_dprint(sc, MPS_ERROR,
2966 "%s: handle %d does not have a valid "
2967 "parent handle!\n", __func__, targ->handle);
2968 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2971 #ifdef OLD_MPS_PROBE
2972 parent_target = mpssas_find_target_by_handle(sassc, 0,
2973 targ->parent_handle);
2975 if (parent_target == NULL) {
2976 mps_dprint(sc, MPS_ERROR,
2977 "%s: handle %d does not have a valid "
2978 "parent target!\n", __func__, targ->handle);
2979 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2983 if ((parent_target->devinfo &
2984 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2985 mps_dprint(sc, MPS_ERROR,
2986 "%s: handle %d parent %d does not "
2987 "have an SMP target!\n", __func__,
2988 targ->handle, parent_target->handle);
2989 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
2993 sasaddr = parent_target->sasaddr;
2994 #else /* OLD_MPS_PROBE */
2995 if ((targ->parent_devinfo &
2996 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) {
2997 mps_dprint(sc, MPS_ERROR,
2998 "%s: handle %d parent %d does not "
2999 "have an SMP target!\n", __func__,
3000 targ->handle, targ->parent_handle);
3001 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3004 if (targ->parent_sasaddr == 0x0) {
3005 mps_dprint(sc, MPS_ERROR,
3006 "%s: handle %d parent handle %d does "
3007 "not have a valid SAS address!\n",
3008 __func__, targ->handle, targ->parent_handle);
3009 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3013 sasaddr = targ->parent_sasaddr;
3014 #endif /* OLD_MPS_PROBE */
3018 mps_dprint(sc, MPS_INFO,
3019 "%s: unable to find SAS address for handle %d\n",
3020 __func__, targ->handle);
3021 mpssas_set_ccbstatus(ccb, CAM_DEV_NOT_THERE);
3024 mpssas_send_smpcmd(sassc, ccb, sasaddr);
3034 mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb)
3036 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3037 struct mps_softc *sc;
3038 struct mps_command *tm;
3039 struct mpssas_target *targ;
3041 MPS_FUNCTRACE(sassc->sc);
3042 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED);
3044 KASSERT(ccb->ccb_h.target_id < sassc->maxtargets,
3045 ("Target %d out of bounds in XPT_RESET_DEV\n",
3046 ccb->ccb_h.target_id));
3048 tm = mpssas_alloc_tm(sc);
3050 mps_dprint(sc, MPS_ERROR,
3051 "command alloc failure in mpssas_action_resetdev\n");
3052 mpssas_set_ccbstatus(ccb, CAM_RESRC_UNAVAIL);
3057 targ = &sassc->targets[ccb->ccb_h.target_id];
3058 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3059 req->DevHandle = htole16(targ->handle);
3060 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3062 /* SAS Hard Link Reset / SATA Link Reset */
3063 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3066 tm->cm_complete = mpssas_resetdev_complete;
3067 tm->cm_complete_data = ccb;
3070 mpssas_prepare_for_tm(sc, tm, targ, CAM_LUN_WILDCARD);
3071 mps_map_command(sc, tm);
3075 mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm)
3077 MPI2_SCSI_TASK_MANAGE_REPLY *resp;
3081 mtx_assert(&sc->mps_mtx, MA_OWNED);
3083 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply;
3084 ccb = tm->cm_complete_data;
3087 * Currently there should be no way we can hit this case. It only
3088 * happens when we have a failure to allocate chain frames, and
3089 * task management commands don't have S/G lists.
3091 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3092 MPI2_SCSI_TASK_MANAGE_REQUEST *req;
3094 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req;
3096 mps_dprint(sc, MPS_ERROR,
3097 "%s: cm_flags = %#x for reset of handle %#04x! "
3098 "This should not happen!\n", __func__, tm->cm_flags,
3100 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3104 mps_dprint(sc, MPS_XINFO,
3105 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__,
3106 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode));
3108 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) {
3109 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP);
3110 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid,
3114 mpssas_set_ccbstatus(ccb, CAM_REQ_CMP_ERR);
3118 mpssas_free_tm(sc, tm);
3123 mpssas_poll(struct cam_sim *sim)
3125 struct mpssas_softc *sassc;
3127 sassc = cam_sim_softc(sim);
3129 if (sassc->sc->mps_debug & MPS_TRACE) {
3130 /* frequent debug messages during a panic just slow
3131 * everything down too much.
3133 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__);
3134 sassc->sc->mps_debug &= ~MPS_TRACE;
3137 mps_intr_locked(sassc->sc);
3141 mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path,
3144 struct mps_softc *sc;
3146 sc = (struct mps_softc *)callback_arg;
3150 case AC_ADVINFO_CHANGED: {
3151 struct mpssas_target *target;
3152 struct mpssas_softc *sassc;
3153 struct scsi_read_capacity_data_long rcap_buf;
3154 struct ccb_dev_advinfo cdai;
3155 struct mpssas_lun *lun;
3160 buftype = (uintptr_t)arg;
3166 * We're only interested in read capacity data changes.
3168 if (buftype != CDAI_TYPE_RCAPLONG)
3172 * We should have a handle for this, but check to make sure.
3174 KASSERT(xpt_path_target_id(path) < sassc->maxtargets,
3175 ("Target %d out of bounds in mpssas_async\n",
3176 xpt_path_target_id(path)));
3177 target = &sassc->targets[xpt_path_target_id(path)];
3178 if (target->handle == 0)
3181 lunid = xpt_path_lun_id(path);
3183 SLIST_FOREACH(lun, &target->luns, lun_link) {
3184 if (lun->lun_id == lunid) {
3190 if (found_lun == 0) {
3191 lun = malloc(sizeof(struct mpssas_lun), M_MPT2,
3194 mps_dprint(sc, MPS_ERROR, "Unable to alloc "
3195 "LUN for EEDP support.\n");
3198 lun->lun_id = lunid;
3199 SLIST_INSERT_HEAD(&target->luns, lun, lun_link);
3202 bzero(&rcap_buf, sizeof(rcap_buf));
3203 bzero(&cdai, sizeof(cdai));
3204 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL);
3205 cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
3206 cdai.ccb_h.flags = CAM_DIR_IN;
3207 cdai.buftype = CDAI_TYPE_RCAPLONG;
3208 cdai.flags = CDAI_FLAG_NONE;
3209 cdai.bufsiz = sizeof(rcap_buf);
3210 cdai.buf = (uint8_t *)&rcap_buf;
3211 xpt_action((union ccb *)&cdai);
3212 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
3213 cam_release_devq(cdai.ccb_h.path,
3216 if ((mpssas_get_ccbstatus((union ccb *)&cdai) == CAM_REQ_CMP)
3217 && (rcap_buf.prot & SRC16_PROT_EN)) {
3218 switch (rcap_buf.prot & SRC16_P_TYPE) {
3221 lun->eedp_formatted = TRUE;
3222 lun->eedp_block_size =
3223 scsi_4btoul(rcap_buf.length);
3227 lun->eedp_formatted = FALSE;
3228 lun->eedp_block_size = 0;
3232 lun->eedp_formatted = FALSE;
3233 lun->eedp_block_size = 0;
3244 * Freeze the devq and set the INRESET flag so that no I/O will be sent to
3245 * the target until the reset has completed. The CCB holds the path which
3246 * is used to release the devq. The devq is released and the CCB is freed
3247 * when the TM completes.
3248 * We only need to do this when we're entering reset, not at each time we
3249 * need to send an abort (which will happen if multiple commands timeout
3250 * while we're sending the abort). We do not release the queue for each
3251 * command we complete (just at the end when we free the tm), so freezing
3252 * it each time doesn't make sense.
3255 mpssas_prepare_for_tm(struct mps_softc *sc, struct mps_command *tm,
3256 struct mpssas_target *target, lun_id_t lun_id)
3261 ccb = xpt_alloc_ccb_nowait();
3263 path_id = cam_sim_path(sc->sassc->sim);
3264 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, path_id,
3265 target->tid, lun_id) != CAM_REQ_CMP) {
3269 tm->cm_targ = target;
3270 if ((target->flags & MPSSAS_TARGET_INRESET) == 0) {
3271 mps_dprint(sc, MPS_XINFO | MPS_RECOVERY,
3272 "%s: Freezing devq for target ID %d\n",
3273 __func__, target->tid);
3274 xpt_freeze_devq(ccb->ccb_h.path, 1);
3275 target->flags |= MPSSAS_TARGET_INRESET;
3282 mpssas_startup(struct mps_softc *sc)
3286 * Send the port enable message and set the wait_for_port_enable flag.
3287 * This flag helps to keep the simq frozen until all discovery events
3290 sc->wait_for_port_enable = 1;
3291 mpssas_send_portenable(sc);
3296 mpssas_send_portenable(struct mps_softc *sc)
3298 MPI2_PORT_ENABLE_REQUEST *request;
3299 struct mps_command *cm;
3303 if ((cm = mps_alloc_command(sc)) == NULL)
3305 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
3306 request->Function = MPI2_FUNCTION_PORT_ENABLE;
3307 request->MsgFlags = 0;
3309 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3310 cm->cm_complete = mpssas_portenable_complete;
3314 mps_map_command(sc, cm);
3315 mps_dprint(sc, MPS_XINFO,
3316 "mps_send_portenable finished cm %p req %p complete %p\n",
3317 cm, cm->cm_req, cm->cm_complete);
3322 mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm)
3324 MPI2_PORT_ENABLE_REPLY *reply;
3325 struct mpssas_softc *sassc;
3331 * Currently there should be no way we can hit this case. It only
3332 * happens when we have a failure to allocate chain frames, and
3333 * port enable commands don't have S/G lists.
3335 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3336 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! "
3337 "This should not happen!\n", __func__, cm->cm_flags);
3340 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
3342 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n");
3343 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) !=
3344 MPI2_IOCSTATUS_SUCCESS)
3345 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
3347 mps_free_command(sc, cm);
3350 * Get WarpDrive info after discovery is complete but before the scan
3351 * starts. At this point, all devices are ready to be exposed to the
3352 * OS. If devices should be hidden instead, take them out of the
3353 * 'targets' array before the scan. The devinfo for a disk will have
3354 * some info and a volume's will be 0. Use that to remove disks.
3356 mps_wd_config_pages(sc);
3359 * Done waiting for port enable to complete. Decrement the refcount.
3360 * If refcount is 0, discovery is complete and a rescan of the bus can
3361 * take place. Since the simq was explicitly frozen before port
3362 * enable, it must be explicitly released here to keep the
3363 * freeze/release count in sync.
3365 sc->wait_for_port_enable = 0;
3366 sc->port_enable_complete = 1;
3367 wakeup(&sc->port_enable_complete);
3368 mpssas_startup_decrement(sassc);
3372 mpssas_check_id(struct mpssas_softc *sassc, int id)
3374 struct mps_softc *sc = sassc->sc;
3378 ids = &sc->exclude_ids[0];
3379 while((name = strsep(&ids, ",")) != NULL) {
3380 if (name[0] == '\0')
3382 if (strtol(name, NULL, 0) == (long)id)
3390 mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
3392 struct mpssas_softc *sassc;
3393 struct mpssas_lun *lun, *lun_tmp;
3394 struct mpssas_target *targ;
3399 * The number of targets is based on IOC Facts, so free all of
3400 * the allocated LUNs for each target and then the target buffer
3403 for (i=0; i< maxtargets; i++) {
3404 targ = &sassc->targets[i];
3405 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
3409 free(sassc->targets, M_MPT2);
3411 sassc->targets = malloc(sizeof(struct mpssas_target) * maxtargets,
3412 M_MPT2, M_WAITOK|M_ZERO);